diff --git a/copyrightstamp.txt b/copyrightstamp.txt index bb382b52d1fef554b0cb6cbb12894ee47fdcea42..64f20833f2e61b2137fa07071d6ecca0be30bd5c 100644 --- a/copyrightstamp.txt +++ b/copyrightstamp.txt @@ -17,6 +17,7 @@ Contributors: * Dominique Benielli <dominique.benielli_AT_univ-amu.fr> * Riikka Huusari <rikka.huusari_AT_univ-amu.fr> * Baptiste Bauvin <baptiste.bauvin_AT_univ-amu.fr> +* Hachem Kadri <hachem.kadri_AT_lis-lab.fr> Description: ----------- diff --git a/doc/doccumbo/conf.py b/doc/doccumbo/conf.py deleted file mode 100644 index e7414e8b56ea28a0e8e305937c8287c684c4d98c..0000000000000000000000000000000000000000 --- a/doc/doccumbo/conf.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- coding: utf-8 -*- - -# import os, sys -# -# MultiviewPlatform documentation build configuration file, created by -# sphinx-quickstart on Mon Jan 29 17:13:09 2018. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../../multiconfusion')) -sys.path.insert(0, os.path.abspath('../..')) -file_loc = os.path.split(__file__)[0] -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(file_loc), '.'))) -import multiconfusion -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -add_module_names = False - -# sys.path.append(os.path.abspath('sphinxext')) - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinx.ext.autodoc', -# 'sphinx.ext.doctest', -# 'sphinx.ext.intersphinx', -# 'sphinx.ext.todo', -# 'nbsphinx', - 'sphinx.ext.coverage', - 'sphinx.ext.imgmath', -# 'sphinx.ext.mathjax', -# 'sphinx.ext.ifconfig', -# 'sphinx.ext.viewcode', -# 'sphinx.ext.githubpages', - 'sphinx.ext.napoleon', - 'm2r',] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} -# source_suffix = '.rst' -source_suffix = ['.rst', '.md'] - -# source_parsers = { -# '.md': CommonMarkParser, -# } - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'MultiConfusion' -copyright = u'2019, Dominique Benielli' -author = u'Dominique Benielli' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'0.0.0' -# The full version, including alpha/beta/rc tags. -release = u'0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = 'sphinx_rtd_theme' -html_theme = 'classic' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [] - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'MultiConfusiondoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'MultiConfusion.tex', u'MultiConfusion Documentation', - u'Dominique Benielli', 'manual'), -] - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'confusion', u'MultiConfusion Documentation', - [author], 1) -] - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'MultiConfusion', u'MultiConfusion Documentation', - author, 'MultiConfusion', 'One line description of project.', - 'Miscellaneous'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} - -# def setup(app): -# app.add_config_value('recommonmark_config', { -# 'auto_toc_tree_section': 'Contents', -# }, True) -# app.add_transform(AutoStructify) diff --git a/doc/doccumbo/credits.rst b/doc/doccumbo/credits.rst deleted file mode 100644 index 9da33fd266b9dbb700fe69648fe05db610020c68..0000000000000000000000000000000000000000 --- a/doc/doccumbo/credits.rst +++ /dev/null @@ -1,59 +0,0 @@ -Credits -======= - -**multiconfusion* is developped by the -`development team <https://developpement.lis-lab.fr/>`_ of the -`LIS <http://www.lis-lab.fr/>`_. - -If you use **multiconfusion** in a scientific publication, please cite the -following paper:: - - @InProceedings{Koco:2011:BAM, - author={Ko\c{c}o, Sokol and Capponi, C{\'e}cile}, - editor={Gunopulos, Dimitrios and Hofmann, Thomas and Malerba, Donato - and Vazirgiannis, Michalis}, - title={A Boosting Approach to Multiview Classification with Cooperation}, - booktitle={Proceedings of the 2011 European Conference on Machine Learning - and Knowledge Discovery in Databases - Volume Part II}, - year={2011}, - location={Athens, Greece}, - publisher={Springer-Verlag}, - address={Berlin, Heidelberg}, - pages={209--228}, - numpages = {20}, - isbn={978-3-642-23783-6} - url={https://link.springer.com/chapter/10.1007/978-3-642-23783-6_14}, - keywords={boosting, classification, multiview learning, - supervised learning}, - } - -References ----------- - -* Sokol Koço, Cécile Capponi, - `"A boosting approach to multiview classification with cooperation" - <https://link.springer.com/chapter/10.1007/978-3-642-23783-6_14>`_, - Proceedings of the 2011 European Conference on Machine Learning (ECML), - Athens, Greece, pp.209-228, 2011, Springer-Verlag. - -* Sokol Koço, - `"Tackling the uneven views problem with cooperation based ensemble - learning methods" <http://www.theses.fr/en/2013AIXM4101>`_, - PhD Thesis, Aix-Marseille Université, 2013. - -Copyright ---------- - -Université d'Aix Marseille (AMU) - -Centre National de la Recherche Scientifique (CNRS) - -Université de Toulon (UTLN). - -Copyright © 2019-2020 AMU, CNRS, UTLN - -License -------- - -**multiconfusion** is free software: you can redistribute it and/or modify -it under the terms of the **GNU Lesser General Public License** as published by -the Free Software Foundation, either **version 3** of the License, or -(at your option) any later version. diff --git a/doc/doccumbo/index.rst b/doc/doccumbo/index.rst deleted file mode 100644 index 740f792823e5898d029d7ef34928005af7988048..0000000000000000000000000000000000000000 --- a/doc/doccumbo/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. Multiconfusion documentation master file, created by - sphinx-quickstart on Mon Sep 2 12:12:08 2019. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Mucumbo's documentation! -=================================== - -**multiconfusion** is a Python package implementing boost algorithms for -machine learning with multimodal with confusion matrix data. - -It is compatible with `scikit-learn <http://scikit-learn.org/>`_, a popular -package for machine learning in Python. - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - install_devel - reference/index - credits - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/doccumbo/reference/index.rst b/doc/doccumbo/reference/index.rst deleted file mode 100644 index 6d289a7bfd35d3770ef6ca770e4ee56b10de2676..0000000000000000000000000000000000000000 --- a/doc/doccumbo/reference/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -Welcome to Multi-View Mu Cumbo's reference! -=========================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - modules - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/doccumbo/reference/modules.rst b/doc/doccumbo/reference/modules.rst deleted file mode 100644 index b6d9bd91ac735d04472dfff3d813c0f07d9b0aaa..0000000000000000000000000000000000000000 --- a/doc/doccumbo/reference/modules.rst +++ /dev/null @@ -1,12 +0,0 @@ -multiconfusion -============== - -.. toctree:: - :maxdepth: 3 - -.. automodule:: multiconfusion.cumbo - :members: - :inherited-members: - - datasets - diff --git a/doc/docmetricexamples/conf.py b/doc/docmetricexamples/conf.py deleted file mode 100644 index a45f0cf2f943497097cbe6fc091262596b280486..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/conf.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- coding: utf-8 -*- - -from datetime import date -import os -import sys -sys.path.insert(0, os.path.abspath('../metriclearning')) -sys.path.insert(0, os.path.abspath('../..')) -sys.path.insert(0, os.path.abspath(".")) -sys.path.append(os.path.join(os.path.dirname(__name__), '..')) -sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sphinxext')) -import metriclearning - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.imgmath', - 'numpydoc', - # 'sphinx_gallery.gen_gallery' -] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'metriclearning' -author = 'Dominique Benielli' -copyright = '2017-{}, LIS UMR 7020'.format(date.today().year) - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = metriclearning.__version__ -# The full version, including alpha/beta/rc tags. -release = metriclearning.__version__ - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'nature' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# "<project> v<release> documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a <link> tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = '{}doc'.format(project) - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - - # Latex figure (float) alignment - 'figure_align': 'htbp'} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, '{}.tex'.format(project), '{} Documentation'.format(project), - author, 'manual')] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, project, '{} Documentation'.format(project), - [author], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, project, '{} Documentation'.format(project), author, project, - 'Multi-View Metric Learning in Vector-Valued Kernel Spaces for machine learning.', - 'Miscellaneous')] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'sklearn': ('http://scikit-learn.org/stable', None) -} - -numpydoc_show_class_members = False - -sphinx_gallery_conf = { - 'doc_module': (project,), - 'backreferences_dir': 'backreferences', - # path to your examples scripts - 'examples_dirs': '../examples', - # path where to save gallery generated examples - 'gallery_dirs': 'auto_examples'} - -# Generate the plots for the gallery -plot_gallery = 'True' diff --git a/doc/docmetricexamples/index.rst b/doc/docmetricexamples/index.rst deleted file mode 100644 index 71e56a03f0e56363349387590c87c4da2246e445..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. metriclearning documentation master file, created by - sphinx-quickstart on Mon Sep 2 12:12:08 2019. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to metriclearning's documentation! -========================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - reference/index - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/docmetricexamples/reference/datasets.rst b/doc/docmetricexamples/reference/datasets.rst deleted file mode 100644 index 21d17f4f12eb38121011208e0b3d295ec38177fe..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/datasets.rst +++ /dev/null @@ -1,14 +0,0 @@ -datasets -======== - -.. automodule:: metriclearning.datasets.base - :members: - :undoc-members: - :show-inheritance: - - - -.. automodule:: metriclearning.datasets.data_sample - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/docmetricexamples/reference/index.rst b/doc/docmetricexamples/reference/index.rst deleted file mode 100644 index f0c60bfb216b320b754b82c1f599bcf9c5edf2f5..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -Welcome to Multi-View metriclearning's reference! -================================================= - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - modules - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/docmetricexamples/reference/lpMKL.rst b/doc/docmetricexamples/reference/lpMKL.rst deleted file mode 100644 index 27a1e37642d6859285c923dd2df5b0b52d12caa2..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/lpMKL.rst +++ /dev/null @@ -1,11 +0,0 @@ -lpMKL -===== - - .. automodule:: metriclearning.lpMKL - :members: MKL - :undoc-members: - :show-inheritance: - - - - diff --git a/doc/docmetricexamples/reference/mkernel.rst b/doc/docmetricexamples/reference/mkernel.rst deleted file mode 100644 index b3eaf1c1558f954e5c06d129290b94fe53341c5e..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/mkernel.rst +++ /dev/null @@ -1,9 +0,0 @@ -abstract mkernel -================ - -.. automodule:: metriclearning.mkernel - :members: MKernel - :undoc-members: - :show-inheritance: - - diff --git a/doc/docmetricexamples/reference/modules.rst b/doc/docmetricexamples/reference/modules.rst deleted file mode 100644 index 6d61b3e57bb53e20298ffa6c6fa0d6a4d0c5a404..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/modules.rst +++ /dev/null @@ -1,11 +0,0 @@ -metriclearning -============== - -.. toctree:: - :maxdepth: 3 - - mkernel - mvml - lpMKL - datasets - diff --git a/doc/docmetricexamples/reference/mvml.rst b/doc/docmetricexamples/reference/mvml.rst deleted file mode 100644 index 3dae7b8b56934c9dd59cff00f80a8f1d5b57152b..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/reference/mvml.rst +++ /dev/null @@ -1,11 +0,0 @@ -MVML of metriclearning -====================== - -.. automodule:: metriclearning.mvml - :members: MVML - :undoc-members: - :show-inheritance: - - - - diff --git a/doc/docmetricexamples/tes-demo.py b/doc/docmetricexamples/tes-demo.py deleted file mode 100644 index 2632e5ef0352bbdf82c76d3cc86d1dd17d1f5530..0000000000000000000000000000000000000000 --- a/doc/docmetricexamples/tes-demo.py +++ /dev/null @@ -1,97 +0,0 @@ -import numpy as np -from sklearn import datasets -from sklearn.metrics.pairwise import rbf_kernel -from metriclearning.mvml import MVML -from metriclearning.lpMKL import MKL -from metriclearning.datasets.data_sample import DataSample -from metriclearning.tests.datasets.get_dataset_path import get_dataset_path -import pickle -np.random.seed(4) - -# =========== create a simple dataset ============ - -n_tot = 200 -half = int(n_tot/2) -n_tr = 120 - -# create a bit more data than needed so that we can take "half" amount of samples for each class -X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) -X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) - -# make multi-view correspondence (select equal number of samples for both classes and order the data same way -# in both views) - -yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) -yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) - -X0 = X0[yinds0, :] -X1 = X1[yinds1, :] -Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 - -n_tot = 200 -half = int(n_tot/2) -n_tr = 120 - -# create a bit more data than needed so that we can take "half" amount of samples for each class -X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) -X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) - -# make multi-view correspondence (select equal number of samples for both classes and order the data same way -# in both views) - -yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) -yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) - -X0 = X0[yinds0, :] -X1 = X1[yinds1, :] -Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 - - -# shuffle -order = np.random.permutation(n_tot) -X0 = X0[order, :] -X1 = X1[order, :] -Y = Y[order] - -# make kernel dictionaries -kernel_dict = {} -test_kernel_dict = {} -kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) -kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) -test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) -test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) - -d= DataSample(kernel_dict) -a = d.data -# np.save(input_x, kernel_dict) -# np.save(input_y, Y) -# f = open(input_x, "wb") -# pickle.dump(input_x, f) -#input_x = get_dataset_path("input_x_dic.pkl") -#f = open(input_x, "r") -#dicoc = pickle.load(f) -# pickle.dump(kernel_dict, f) -#f.close() -# =========== use MVML in classifying the data ============ - -# demo on how the code is intended to be used; parameters are not cross-validated, just picked some -# mvml = MVML(kernel_dict, Y[0:n_tr], [0.1, 1], nystrom_param=0.2) - - -mvml = MVML( [0.1, 1], nystrom_param=0.2) -mvml.fit(a, Y[0:n_tr]) -print("x shape", mvml.X_.shape) -print("x shape int",mvml.X_.shapes_int) -dd = DataSample(test_kernel_dict) -X_test = dd.data -red1 = mvml.predict(X_test) - -mkl = MKL(lmbda=0.1) -mkl.fit(kernel_dict,Y[0:n_tr] ) - -mkl.predict(X_test) -#red1 = np.sign(mvml.predict_mvml(test_kernel_dict, g1, w1)) - - - - diff --git a/doc/docmumbo/api.rst b/doc/docmumbo/api.rst deleted file mode 100644 index 5a9ad853bdbc2ee10d99c22e7fe6fb8c3de00b75..0000000000000000000000000000000000000000 --- a/doc/docmumbo/api.rst +++ /dev/null @@ -1,9 +0,0 @@ -API Documentation -================= - -multimodalboost.mumbo ---------------------- - -.. automodule:: multimodalboost.mumbo - :members: - :inherited-members: diff --git a/doc/docmumbo/auto_examples/auto_examples_jupyter.zip b/doc/docmumbo/auto_examples/auto_examples_jupyter.zip deleted file mode 100644 index a907b3133a5141e380b3d062d50de3f0672f99f0..0000000000000000000000000000000000000000 Binary files a/doc/docmumbo/auto_examples/auto_examples_jupyter.zip and /dev/null differ diff --git a/doc/docmumbo/auto_examples/auto_examples_python.zip b/doc/docmumbo/auto_examples/auto_examples_python.zip deleted file mode 100644 index 4fd29deea830691b773c143489fc1a246df84a67..0000000000000000000000000000000000000000 Binary files a/doc/docmumbo/auto_examples/auto_examples_python.zip and /dev/null differ diff --git a/doc/docmumbo/auto_examples/index.rst b/doc/docmumbo/auto_examples/index.rst deleted file mode 100644 index ba7c2afc25e39a5e29e400188661dd0c78744d79..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/index.rst +++ /dev/null @@ -1,83 +0,0 @@ -:orphan: - - - -.. _sphx_glr_auto_examples: - -.. _examples: - -Examples -======== - -The following toy examples illustrate how the MuMBo algorithm exploits -cooperation between views for classification. - - - -.. raw:: html - - <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from two classes, split between two two-dimensional views..."> - -.. only:: html - - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png - - :ref:`sphx_glr_auto_examples_plot_2_views_2_classes.py` - -.. raw:: html - - </div> - - -.. toctree:: - :hidden: - - /auto_examples/plot_2_views_2_classes - -.. raw:: html - - <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from three classes, split between three two-dimensional v..."> - -.. only:: html - - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png - - :ref:`sphx_glr_auto_examples_plot_3_views_3_classes.py` - -.. raw:: html - - </div> - - -.. toctree:: - :hidden: - - /auto_examples/plot_3_views_3_classes -.. raw:: html - - <div class="sphx-glr-clear"></div> - - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-gallery - - - .. container:: sphx-glr-download - - :download:`Download all examples in Python source code: auto_examples_python.zip <//home/dominique/projets/ANR-Lives/multimodal/multimodalboost/doc/auto_examples/auto_examples_python.zip>` - - - - .. container:: sphx-glr-download - - :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip <//home/dominique/projets/ANR-Lives/multimodal/multimodalboost/doc/auto_examples/auto_examples_jupyter.zip>` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/docmumbo/auto_examples/plot_2_views_2_classes.ipynb b/doc/docmumbo/auto_examples/plot_2_views_2_classes.ipynb deleted file mode 100644 index 958f7f5803047b3a7c125914d74d7c1776469175..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/plot_2_views_2_classes.ipynb +++ /dev/null @@ -1,54 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n==========================\n2 views, 2 classes example\n==========================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Universit\u00e9 d'Aix Marseille (AMU) -\n# Centre National de la Recherche Scientifique (CNRS) -\n# Universit\u00e9 de Toulon (UTLN).\n# Copyright \u00a9 2017-2018 AMU, CNRS, UTLN\n#\n# This file is part of multimodalboost.\n#\n# multimodalboost is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# multimodalboost is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with multimodalboost. If not, see <http://www.gnu.org/licenses/>.\n#\n# Author: Florent JAILLET - Laboratoire d'Informatique et Syst\u00e8mes - UMR 7020\n\nimport numpy as np\nfrom multimodalboost.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n 'their\\nclassification results, while the third one uses the data of '\n 'view 1:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(2):\n ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(2):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:')\n\nplt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_2_views_2_classes.py.md5 b/doc/docmumbo/auto_examples/plot_2_views_2_classes.py.md5 deleted file mode 100644 index 1172a53ec81cdd93bf191f217043413337587907..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/plot_2_views_2_classes.py.md5 +++ /dev/null @@ -1 +0,0 @@ -6bfb170b1af59e9ec2958eea239976e2 \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_2_views_2_classes_codeobj.pickle b/doc/docmumbo/auto_examples/plot_2_views_2_classes_codeobj.pickle deleted file mode 100644 index ced0469b4f5b5b29ca214bfe0fbde24ba4659c5b..0000000000000000000000000000000000000000 Binary files a/doc/docmumbo/auto_examples/plot_2_views_2_classes_codeobj.pickle and /dev/null differ diff --git a/doc/docmumbo/auto_examples/plot_3_views_3_classes.ipynb b/doc/docmumbo/auto_examples/plot_3_views_3_classes.ipynb deleted file mode 100644 index bf392e76c61972592842f67e3e412c698d1ac2a6..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/plot_3_views_3_classes.ipynb +++ /dev/null @@ -1,54 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n==========================\n3 views, 3 classes example\n==========================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Universit\u00e9 d'Aix Marseille (AMU) -\n# Centre National de la Recherche Scientifique (CNRS) -\n# Universit\u00e9 de Toulon (UTLN).\n# Copyright \u00a9 2017-2018 AMU, CNRS, UTLN\n#\n# This file is part of multimodalboost.\n#\n# multimodalboost is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# multimodalboost is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with multimodalboost. If not, see <http://www.gnu.org/licenses/>.\n#\n# Author: Florent JAILLET - Laboratoire d'Informatique et Syst\u00e8mes - UMR 7020\n\nimport numpy as np\nfrom multimodalboost.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n 'its classification\\nresults, the second and third sub-classifiers use '\n 'the data of view 1, while the\\nfourth one uses the data of '\n 'view 2:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(3):\n ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_3_views_3_classes.py.md5 b/doc/docmumbo/auto_examples/plot_3_views_3_classes.py.md5 deleted file mode 100644 index e7abc15ad3bfab64532a40674ffb12da81e66730..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/plot_3_views_3_classes.py.md5 +++ /dev/null @@ -1 +0,0 @@ -2dba08fc2c1e223dbeac65eeda17b109 \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_3_views_3_classes_codeobj.pickle b/doc/docmumbo/auto_examples/plot_3_views_3_classes_codeobj.pickle deleted file mode 100644 index 6f889f01721cd550caa52cbdd98e6550763ee216..0000000000000000000000000000000000000000 Binary files a/doc/docmumbo/auto_examples/plot_3_views_3_classes_codeobj.pickle and /dev/null differ diff --git a/doc/docmumbo/auto_examples/sg_execution_times.rst b/doc/docmumbo/auto_examples/sg_execution_times.rst deleted file mode 100644 index 2ac4607d11c692c649e2db4396e59b36a9295086..0000000000000000000000000000000000000000 --- a/doc/docmumbo/auto_examples/sg_execution_times.rst +++ /dev/null @@ -1,14 +0,0 @@ - -:orphan: - -.. _sphx_glr_auto_examples_sg_execution_times: - -Computation times -================= -**00:02.026** total execution time for **auto_examples** files: - -+-----------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_3_views_3_classes.py` (``plot_3_views_3_classes.py``) | 00:01.293 | 0.0 MB | -+-----------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_2_views_2_classes.py` (``plot_2_views_2_classes.py``) | 00:00.733 | 0.0 MB | -+-----------------------------------------------------------------------------------------+-----------+--------+ diff --git a/doc/docmumbo/conf.py b/doc/docmumbo/conf.py deleted file mode 100644 index e989eef7fc0407bc9333f7a3ee22efaa4a9f4f07..0000000000000000000000000000000000000000 --- a/doc/docmumbo/conf.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- - -from datetime import date -import os - -import multimodalboost - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'numpydoc', - 'sphinx_gallery.gen_gallery'] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'multimodalboost' -author = 'Florent Jaillet' -copyright = '2017-{}, LIS UMR 7020'.format(date.today().year) - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = multimodalboost.__version__ -# The full version, including alpha/beta/rc tags. -release = multimodalboost.__version__ - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'nature' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# "<project> v<release> documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a <link> tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = '{}doc'.format(project) - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - - # Latex figure (float) alignment - 'figure_align': 'htbp'} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, '{}.tex'.format(project), '{} Documentation'.format(project), - author, 'manual')] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, project, '{} Documentation'.format(project), - [author], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, project, '{} Documentation'.format(project), author, project, - 'Boost algorithms for machine learning with multimodal data.', - 'Miscellaneous')] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'sklearn': ('http://scikit-learn.org/stable', None) -} - -numpydoc_show_class_members = False - -sphinx_gallery_conf = { - 'doc_module': (project,), - 'backreferences_dir': 'backreferences', - # path to your examples scripts - 'examples_dirs': '../examples', - # path where to save gallery generated examples - 'gallery_dirs': 'auto_examples'} - -# Generate the plots for the gallery -plot_gallery = 'True' diff --git a/doc/docmumbo/index.rst b/doc/docmumbo/index.rst deleted file mode 100644 index 3687ae362c02d40859f0807f2feac8b9bbc0c264..0000000000000000000000000000000000000000 --- a/doc/docmumbo/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. project-template documentation master file, created by - sphinx-quickstart on Mon Jan 18 14:44:12 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -multimodalboost -=============== - -**multimodalboost** is a Python package implementing boost algorithms for -machine learning with multimodal data. - -It is compatible with `scikit-learn <http://scikit-learn.org/>`_, a popular -package for machine learning in Python. - -Documentation -------------- - -:Release: |version| -:Date: |today| - -.. toctree:: - :maxdepth: 1 - - install_devel - api - auto_examples/index - credits - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/index.rst b/doc/index.rst index 6e5825639d13fb3884055b1dc02338107056d949..d21b6f50d0190a376ee1faac8a1cd976f661a675 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -24,7 +24,7 @@ Documentation reference/api tutorial/install_devel tutorial/auto_examples/index - + tutorial/auto_examples/sg_execution_times Indices and tables diff --git a/doc/tutorial/auto_examples/auto_examples_jupyter.zip b/doc/tutorial/auto_examples/auto_examples_jupyter.zip new file mode 100644 index 0000000000000000000000000000000000000000..414431be2fe9801b778c2f36079e5f7fbf1acf21 Binary files /dev/null and b/doc/tutorial/auto_examples/auto_examples_jupyter.zip differ diff --git a/doc/tutorial/auto_examples/auto_examples_python.zip b/doc/tutorial/auto_examples/auto_examples_python.zip new file mode 100644 index 0000000000000000000000000000000000000000..ac5d7f5242bf3a39fc161b786c227078135bab41 Binary files /dev/null and b/doc/tutorial/auto_examples/auto_examples_python.zip differ diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..81b05f4a84a25e90794c84757ff28fcc48dcdf2e --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCombo 2 views, 2 classes example\n==================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuCuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe two first sub-classifiers use the data of view 0 to compute '\n# 'their\\nclassification results, while the third one uses the data of '\n# 'view 1:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\n# fig = plt.figure(figsize=(12., 7.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(2):\n# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(2):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/examples/cumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py similarity index 94% rename from examples/cumbo_plot_2_views_2_classes.py rename to doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py index 28760f5fdc8319f73014f13e111887b9591717df..c669f6919c71f964891940bdfa3c1276e7342992 100644 --- a/examples/cumbo_plot_2_views_2_classes.py +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -2 views, 2 classes example -========================== +================================== +MuCombo 2 views, 2 classes example +================================== In this toy example, we generate data from two classes, split between two two-dimensional views. @@ -18,7 +18,7 @@ the points represented by dots (.) are well separated in view 1 while they are not separated in view 0. In this figure, the blue symbols represent points of class 0, while red symbols represent points of class 1. -The MuMBo algorithm take adavantage of the complementarity of the two views to +The MuCuMBo algorithm take adavantage of the complementarity of the two views to rightly classify the points. """ @@ -62,7 +62,7 @@ n_estimators = 3 clf = MuCumboClassifier(n_estimators=n_estimators) clf.fit(X, y, views_ind) -print('\nAfter 3 iterations, the MuMBo classifier reaches exact ' +print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' 'classification for the\nlearning samples:') for ind, score in enumerate(clf.staged_score(X, y)): print(' - iteration {}, score: {}'.format(ind + 1, score)) diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5398624bc3b285454a31afa96443822cf100da7 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst @@ -0,0 +1,170 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py: + + +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 100 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1), axis=1) + + y = np.zeros(4*n_samples, dtype=np.int64) + y[2*n_samples:] = 1 + + views_ind = np.array([0, 2, 4]) + + n_estimators = 3 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + + print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe two first sub-classifiers use the data of view 0 to compute ' + # 'their\nclassification results, while the third one uses the data of ' + # 'view 1:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + + fig = plt.figure(figsize=(10., 8.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + styles = ('.b', '.r') + # fig = plt.figure(figsize=(12., 7.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(2): + # ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(2): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:') + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: cumbo_plot_2_views_2_classes.py <cumbo_plot_2_views_2_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: cumbo_plot_2_views_2_classes.ipynb <cumbo_plot_2_views_2_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..390f2a42106ab46aa0b53783421621e8c579b93c Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a6981bbc01227435bf8910e464bf977a12095aff --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCumbo 3 views, 3 classes example\n==================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuCuMbo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe first sub-classifier uses the data of view 0 to compute '\n# 'its classification\\nresults, the second and third sub-classifiers use '\n# 'the data of view 1, while the\\nfourth one uses the data of '\n# 'view 2:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\n# fig = plt.figure(figsize=(14., 11.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(3):\n# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(3):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/examples/cumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py similarity index 96% rename from examples/cumbo_plot_3_views_3_classes.py rename to doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py index 6a03a103d15cea1025ba950587f847e407781984..058b2dc7e6562d83d19de511031f19f90b567926 100644 --- a/examples/cumbo_plot_3_views_3_classes.py +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -3 views, 3 classes example -========================== +================================== +MuCumbo 3 views, 3 classes example +================================== In this toy example, we generate data from three classes, split between three two-dimensional views. @@ -12,7 +12,7 @@ well seperated, while the points for the third class are not seperated with the two other classes. That means that, taken separately, none of the single views allows for a good classification of the data. -Nevertheless, the MuMBo algorithm take adavantage of the complementarity of +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of the views to rightly classify the points. """ diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..aa7cfabe9614612e0b800f27d7a7f33bf9eea58c --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst @@ -0,0 +1,164 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py: + + +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 300 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1, view_2), axis=1) + + y = np.zeros(3*n_samples, dtype=np.int64) + y[n_samples:2*n_samples] = 1 + y[2*n_samples:] = 2 + + views_ind = np.array([0, 2, 4, 6]) + + n_estimators = 4 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe first sub-classifier uses the data of view 0 to compute ' + # 'its classification\nresults, the second and third sub-classifiers use ' + # 'the data of view 1, while the\nfourth one uses the data of ' + # 'view 2:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + + styles = ('.b', '.r', '.g') + fig = plt.figure(figsize=(12., 11.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + # fig = plt.figure(figsize=(14., 11.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(3): + # ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(3): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: cumbo_plot_3_views_3_classes.py <cumbo_plot_3_views_3_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: cumbo_plot_3_views_3_classes.ipynb <cumbo_plot_3_views_3_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..390f2a42106ab46aa0b53783421621e8c579b93c Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle differ diff --git a/doc/docmumbo/auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png b/doc/tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_2_views_2_classes_001.png similarity index 100% rename from doc/docmumbo/auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png rename to doc/tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_2_views_2_classes_001.png diff --git a/doc/docmumbo/auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png b/doc/tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_3_views_3_classes_001.png similarity index 100% rename from doc/docmumbo/auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png rename to doc/tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_3_views_3_classes_001.png diff --git a/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png differ diff --git a/doc/docmumbo/auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_2_views_2_classes_thumb.png similarity index 100% rename from doc/docmumbo/auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png rename to doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_2_views_2_classes_thumb.png diff --git a/doc/docmumbo/auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_3_views_3_classes_thumb.png similarity index 100% rename from doc/docmumbo/auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png rename to doc/tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_3_views_3_classes_thumb.png diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..81b05f4a84a25e90794c84757ff28fcc48dcdf2e --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCombo 2 views, 2 classes example\n==================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuCuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe two first sub-classifiers use the data of view 0 to compute '\n# 'their\\nclassification results, while the third one uses the data of '\n# 'view 1:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\n# fig = plt.figure(figsize=(12., 7.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(2):\n# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(2):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..c669f6919c71f964891940bdfa3c1276e7342992 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 100 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1), axis=1) + +y = np.zeros(4*n_samples, dtype=np.int64) +y[2*n_samples:] = 1 + +views_ind = np.array([0, 2, 4]) + +n_estimators = 3 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + +print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe two first sub-classifiers use the data of view 0 to compute ' +# 'their\nclassification results, while the third one uses the data of ' +# 'view 1:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + +fig = plt.figure(figsize=(10., 8.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +styles = ('.b', '.r') +# fig = plt.figure(figsize=(12., 7.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(2): +# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(2): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:') + +plt.show() diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py.md5 b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py.md5 new file mode 100644 index 0000000000000000000000000000000000000000..06630ff19708de5a1323bf19987c05ff64028b9b --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.py.md5 @@ -0,0 +1 @@ +5b1bad9179e6c052ce9a8198c8443c9a \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.rst b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..f47b92abcb3d2794aa65b4b003ed32f1ae74773d --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes.rst @@ -0,0 +1,208 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py: + + +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. + + + +.. image:: /tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_2_views_2_classes_001.png + :class: sphx-glr-single-img + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + /home/dominique/.virtualenvs/env_multiv/local/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release. + from numpy.core.umath_tests import inner1d + + After 3 iterations, the MuCuMBo classifier reaches exact classification for the + learning samples: + - iteration 1, score: 0.75 + - iteration 2, score: 0.75 + + The resulting MuCuMBo classifier uses three sub-classifiers that are wheighted + using the following weights: + estimator weights: [[0.54930614 0.54930614]] + + The first figure displays the data, splitting the representation between the + two views. + + The second figure displays the classification results for the sub-classifiers + on the learning sample data. + + + + + + + +| + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 100 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1), axis=1) + + y = np.zeros(4*n_samples, dtype=np.int64) + y[2*n_samples:] = 1 + + views_ind = np.array([0, 2, 4]) + + n_estimators = 3 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + + print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe two first sub-classifiers use the data of view 0 to compute ' + # 'their\nclassification results, while the third one uses the data of ' + # 'view 1:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + + fig = plt.figure(figsize=(10., 8.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + styles = ('.b', '.r') + # fig = plt.figure(figsize=(12., 7.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(2): + # ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(2): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:') + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.616 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: plot_cumbo_2_views_2_classes.py <plot_cumbo_2_views_2_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: plot_cumbo_2_views_2_classes.ipynb <plot_cumbo_2_views_2_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..c952702cb6c9aef634d994d47180af94e03b0775 Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a6981bbc01227435bf8910e464bf977a12095aff --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCumbo 3 views, 3 classes example\n==================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuCuMbo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe first sub-classifier uses the data of view 0 to compute '\n# 'its classification\\nresults, the second and third sub-classifiers use '\n# 'the data of view 1, while the\\nfourth one uses the data of '\n# 'view 2:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\n# fig = plt.figure(figsize=(14., 11.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(3):\n# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(3):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..058b2dc7e6562d83d19de511031f19f90b567926 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 300 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1, view_2), axis=1) + +y = np.zeros(3*n_samples, dtype=np.int64) +y[n_samples:2*n_samples] = 1 +y[2*n_samples:] = 2 + +views_ind = np.array([0, 2, 4, 6]) + +n_estimators = 4 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + +print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe first sub-classifier uses the data of view 0 to compute ' +# 'its classification\nresults, the second and third sub-classifiers use ' +# 'the data of view 1, while the\nfourth one uses the data of ' +# 'view 2:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + +styles = ('.b', '.r', '.g') +fig = plt.figure(figsize=(12., 11.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +# fig = plt.figure(figsize=(14., 11.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(3): +# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(3): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + +plt.show() diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py.md5 b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py.md5 new file mode 100644 index 0000000000000000000000000000000000000000..c3653c303ada0ff50065d5553e904beadff08509 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.py.md5 @@ -0,0 +1 @@ +62ad2fbd5b04b88cb8fe2f86822f60b9 \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.rst b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..07d466987c9d5cfa3106642357ea4de904421aaf --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes.rst @@ -0,0 +1,201 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py: + + +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. + + + +.. image:: /tutorial/auto_examples/cumbo/images/sphx_glr_plot_cumbo_3_views_3_classes_001.png + :class: sphx-glr-single-img + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + + After 4 iterations, the MuCuMBo classifier reaches exact classification for the + learning samples: + - iteration 1, score: 0.6666666666666666 + - iteration 2, score: 0.6666666666666666 + - iteration 3, score: 1.0 + + The resulting MuCuMBo classifier uses four sub-classifiers that are wheighted + using the following weights: + estimator weights alpha: [[0.54930614 0.54930614 0.54930614]] + + The first figure displays the data, splitting the representation between the + three views. + + The second figure displays the classification results for the sub-classifiers + on the learning sample data. + + + + + + + +| + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 300 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1, view_2), axis=1) + + y = np.zeros(3*n_samples, dtype=np.int64) + y[n_samples:2*n_samples] = 1 + y[2*n_samples:] = 2 + + views_ind = np.array([0, 2, 4, 6]) + + n_estimators = 4 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe first sub-classifier uses the data of view 0 to compute ' + # 'its classification\nresults, the second and third sub-classifiers use ' + # 'the data of view 1, while the\nfourth one uses the data of ' + # 'view 2:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + + styles = ('.b', '.r', '.g') + fig = plt.figure(figsize=(12., 11.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + # fig = plt.figure(figsize=(14., 11.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(3): + # ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(3): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.497 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: plot_cumbo_3_views_3_classes.py <plot_cumbo_3_views_3_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: plot_cumbo_3_views_3_classes.ipynb <plot_cumbo_3_views_3_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..746fbf9ac929cb3135a4802a315dfa4898275a83 Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/cumbo/sg_execution_times.rst b/doc/tutorial/auto_examples/cumbo/sg_execution_times.rst new file mode 100644 index 0000000000000000000000000000000000000000..8d71d51ea6217a19f9d1624037b9c6130b9c3aa2 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo/sg_execution_times.rst @@ -0,0 +1,14 @@ + +:orphan: + +.. _sphx_glr_tutorial_auto_examples_cumbo_sg_execution_times: + +Computation times +================= +**00:01.113** total execution time for **tutorial_auto_examples_cumbo** files: + ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py` (``plot_cumbo_2_views_2_classes.py``) | 00:00.616 | 0.0 MB | ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py` (``plot_cumbo_3_views_3_classes.py``) | 00:00.497 | 0.0 MB | ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..81b05f4a84a25e90794c84757ff28fcc48dcdf2e --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCombo 2 views, 2 classes example\n==================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuCuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe two first sub-classifiers use the data of view 0 to compute '\n# 'their\\nclassification results, while the third one uses the data of '\n# 'view 1:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\n# fig = plt.figure(figsize=(12., 7.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(2):\n# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(2):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..c669f6919c71f964891940bdfa3c1276e7342992 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 100 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1), axis=1) + +y = np.zeros(4*n_samples, dtype=np.int64) +y[2*n_samples:] = 1 + +views_ind = np.array([0, 2, 4]) + +n_estimators = 3 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + +print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe two first sub-classifiers use the data of view 0 to compute ' +# 'their\nclassification results, while the third one uses the data of ' +# 'view 1:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + +fig = plt.figure(figsize=(10., 8.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +styles = ('.b', '.r') +# fig = plt.figure(figsize=(12., 7.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(2): +# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(2): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:') + +plt.show() diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..74f22bdda0d21f8ad0e61d91041a3f9ea35957ee --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst @@ -0,0 +1,170 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py: + + +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 100 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1), axis=1) + + y = np.zeros(4*n_samples, dtype=np.int64) + y[2*n_samples:] = 1 + + views_ind = np.array([0, 2, 4]) + + n_estimators = 3 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + + print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe two first sub-classifiers use the data of view 0 to compute ' + # 'their\nclassification results, while the third one uses the data of ' + # 'view 1:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + + fig = plt.figure(figsize=(10., 8.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + styles = ('.b', '.r') + # fig = plt.figure(figsize=(12., 7.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(2): + # ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(2): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:') + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: cumbo_plot_2_views_2_classes.py <cumbo_plot_2_views_2_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: cumbo_plot_2_views_2_classes.ipynb <cumbo_plot_2_views_2_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4d050d75629df8605ed87caa1475615c38a68de4 Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a6981bbc01227435bf8910e464bf977a12095aff --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n==================================\nMuCumbo 3 views, 3 classes example\n==================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuCuMbo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuCuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe first sub-classifier uses the data of view 0 to compute '\n# 'its classification\\nresults, the second and third sub-classifiers use '\n# 'the data of view 1, while the\\nfourth one uses the data of '\n# 'view 2:\\n'\n# ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\n# fig = plt.figure(figsize=(14., 11.))\n# fig.suptitle('Classification results on the learning data for the '\n# 'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n# best_view = clf.best_views_[ind_estimator]\n# y_pred = clf.estimators_[ind_estimator].predict(\n# X[:, 2*best_view:2*best_view+2])\n# background_color = (1.0, 1.0, 0.9)\n# for ind_view in range(3):\n# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n# if ind_view == best_view:\n# ax.set_facecolor(background_color)\n# ax.set_title(\n# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n# ind_feature = ind_view * 2\n# for ind_class in range(3):\n# ind_samples = (y_pred == ind_class)\n# ax.plot(X[ind_samples, ind_feature],\n# X[ind_samples, ind_feature + 1],\n# styles[ind_class],\n# label='Class {}'.format(ind_class))\n# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..058b2dc7e6562d83d19de511031f19f90b567926 --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 300 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1, view_2), axis=1) + +y = np.zeros(3*n_samples, dtype=np.int64) +y[n_samples:2*n_samples] = 1 +y[2*n_samples:] = 2 + +views_ind = np.array([0, 2, 4, 6]) + +n_estimators = 4 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + +print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe first sub-classifier uses the data of view 0 to compute ' +# 'its classification\nresults, the second and third sub-classifiers use ' +# 'the data of view 1, while the\nfourth one uses the data of ' +# 'view 2:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + +styles = ('.b', '.r', '.g') +fig = plt.figure(figsize=(12., 11.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +# fig = plt.figure(figsize=(14., 11.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(3): +# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(3): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + +plt.show() diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..a002f637a78a1dbf568e3cb9641652660da8195c --- /dev/null +++ b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst @@ -0,0 +1,164 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py: + + +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.cumbo import MuCumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 300 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1, view_2), axis=1) + + y = np.zeros(3*n_samples, dtype=np.int64) + y[n_samples:2*n_samples] = 1 + y[2*n_samples:] = 2 + + views_ind = np.array([0, 2, 4, 6]) + + n_estimators = 4 + clf = MuCumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + + # print('\nThe first sub-classifier uses the data of view 0 to compute ' + # 'its classification\nresults, the second and third sub-classifiers use ' + # 'the data of view 1, while the\nfourth one uses the data of ' + # 'view 2:\n' + # ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + + styles = ('.b', '.r', '.g') + fig = plt.figure(figsize=(12., 11.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + # fig = plt.figure(figsize=(14., 11.)) + # fig.suptitle('Classification results on the learning data for the ' + # 'sub-classifiers', size=16) + # for ind_estimator in range(n_estimators): + # best_view = clf.best_views_[ind_estimator] + # y_pred = clf.estimators_[ind_estimator].predict( + # X[:, 2*best_view:2*best_view+2]) + # background_color = (1.0, 1.0, 0.9) + # for ind_view in range(3): + # ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + # if ind_view == best_view: + # ax.set_facecolor(background_color) + # ax.set_title( + # 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + # ind_feature = ind_view * 2 + # for ind_class in range(3): + # ind_samples = (y_pred == ind_class) + # ax.plot(X[ind_samples, ind_feature], + # X[ind_samples, ind_feature + 1], + # styles[ind_class], + # label='Class {}'.format(ind_class)) + # ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: cumbo_plot_3_views_3_classes.py <cumbo_plot_3_views_3_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: cumbo_plot_3_views_3_classes.ipynb <cumbo_plot_3_views_3_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4d050d75629df8605ed87caa1475615c38a68de4 Binary files /dev/null and b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png b/doc/tutorial/auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png new file mode 100644 index 0000000000000000000000000000000000000000..6c10bfca50d38ffb88d84b3bd7603ff549a6f5e2 Binary files /dev/null and b/doc/tutorial/auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png differ diff --git a/doc/docmumbo/auto_examples/images/sphx_glr_plot_2_views_2_classes_002.png b/doc/tutorial/auto_examples/images/sphx_glr_plot_2_views_2_classes_002.png similarity index 100% rename from doc/docmumbo/auto_examples/images/sphx_glr_plot_2_views_2_classes_002.png rename to doc/tutorial/auto_examples/images/sphx_glr_plot_2_views_2_classes_002.png diff --git a/doc/tutorial/auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png b/doc/tutorial/auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png new file mode 100644 index 0000000000000000000000000000000000000000..34d20dc6f92a4bfed33da87d308a7e6ecb2aaa0a Binary files /dev/null and b/doc/tutorial/auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png differ diff --git a/doc/docmumbo/auto_examples/images/sphx_glr_plot_3_views_3_classes_002.png b/doc/tutorial/auto_examples/images/sphx_glr_plot_3_views_3_classes_002.png similarity index 100% rename from doc/docmumbo/auto_examples/images/sphx_glr_plot_3_views_3_classes_002.png rename to doc/tutorial/auto_examples/images/sphx_glr_plot_3_views_3_classes_002.png diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_cumbo_plot_3_views_3_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_mvml_plot__thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mvml_plot__thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_mvml_plot__thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..5387f6d37a91d3136168809c420c760dbb86cb43 Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..5f94c4d01baf9fd165269e47b7a85b55cde225a7 Binary files /dev/null and b/doc/tutorial/auto_examples/images/thumb/sphx_glr_plot_3_views_3_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/index.rst b/doc/tutorial/auto_examples/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..17002d9e6f6fdcc72be27726c28fde2c33b50e56 --- /dev/null +++ b/doc/tutorial/auto_examples/index.rst @@ -0,0 +1,205 @@ +:orphan: + + + +.. _sphx_glr_tutorial_auto_examples: + +.. _examples: + +Multimodal Examples +=================== + + + + + +.. raw:: html + + <div class="sphx-glr-clear"></div> + + + +.. _sphx_glr_tutorial_auto_examples_cumbo: + +.. _examples: + +Examples +======== + +MuCuMBo Examples +---------------- + +The following toy examples illustrate how the MuCuMBo algorithm exploits +cooperation between views for classification. + + + + +.. raw:: html + + <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from two classes, split between two two-dimensional views..."> + +.. only:: html + + .. figure:: /tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_2_views_2_classes_thumb.png + + :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py` + +.. raw:: html + + </div> + + +.. toctree:: + :hidden: + + /tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes + +.. raw:: html + + <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from three classes, split between three two-dimensional v..."> + +.. only:: html + + .. figure:: /tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_3_views_3_classes_thumb.png + + :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py` + +.. raw:: html + + </div> + + +.. toctree:: + :hidden: + + /tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes +.. raw:: html + + <div class="sphx-glr-clear"></div> + + + +.. _sphx_glr_tutorial_auto_examples_mumbo: + +.. _examples: + +Examples +======== + +MuMBo Examples +-------------- + +The following toy examples illustrate how the MuMBo algorithm exploits +cooperation between views for classification. + + + + +.. raw:: html + + <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from two classes, split between two two-dimensional views..."> + +.. only:: html + + .. figure:: /tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_2_views_2_classes_thumb.png + + :ref:`sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_2_views_2_classes.py` + +.. raw:: html + + </div> + + +.. toctree:: + :hidden: + + /tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes + +.. raw:: html + + <div class="sphx-glr-thumbcontainer" tooltip="In this toy example, we generate data from three classes, split between three two-dimensional v..."> + +.. only:: html + + .. figure:: /tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_3_views_3_classes_thumb.png + + :ref:`sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_3_views_3_classes.py` + +.. raw:: html + + </div> + + +.. toctree:: + :hidden: + + /tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes +.. raw:: html + + <div class="sphx-glr-clear"></div> + + + +.. _sphx_glr_tutorial_auto_examples_mvml: + +.. _examples: + +Examples +======== + +MVML +---- + +The following toy examples illustrate how the MVML algorithm + + + + +.. raw:: html + + <div class="sphx-glr-thumbcontainer" tooltip="Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function..."> + +.. only:: html + + .. figure:: /tutorial/auto_examples/mvml/images/thumb/sphx_glr_plot_mvml__thumb.png + + :ref:`sphx_glr_tutorial_auto_examples_mvml_plot_mvml_.py` + +.. raw:: html + + </div> + + +.. toctree:: + :hidden: + + /tutorial/auto_examples/mvml/plot_mvml_ +.. raw:: html + + <div class="sphx-glr-clear"></div> + + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-gallery + + + .. container:: sphx-glr-download + + :download:`Download all examples in Python source code: auto_examples_python.zip <//home/dominique/projets/ANR-Lives/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_python.zip>` + + + + .. container:: sphx-glr-download + + :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip <//home/dominique/projets/ANR-Lives/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_jupyter.zip>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_001.png b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_001.png new file mode 100644 index 0000000000000000000000000000000000000000..6c10bfca50d38ffb88d84b3bd7603ff549a6f5e2 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_001.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_002.png b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_002.png new file mode 100644 index 0000000000000000000000000000000000000000..0a7b2d55a07522b45d671a4a5135e0a96d1c6727 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_002.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_001.png b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_001.png new file mode 100644 index 0000000000000000000000000000000000000000..34d20dc6f92a4bfed33da87d308a7e6ecb2aaa0a Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_001.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_002.png b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_002.png new file mode 100644 index 0000000000000000000000000000000000000000..c17cb7816691a1c00401885d938ec50f7afd366f Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_002.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_mumbo_plot_3_views_3_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_2_views_2_classes_thumb.png b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_2_views_2_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..5387f6d37a91d3136168809c420c760dbb86cb43 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_2_views_2_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_3_views_3_classes_thumb.png b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_3_views_3_classes_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..5f94c4d01baf9fd165269e47b7a85b55cde225a7 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/images/thumb/sphx_glr_plot_mumbo_3_views_3_classes_thumb.png differ diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..bfbb2d3995d82bea63e0b51401412d7705dcb45d --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 2 views, 2 classes example\n================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n 'their\\nclassification results, while the third one uses the data of '\n 'view 1:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(2):\n ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(2):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/examples/mumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py similarity index 97% rename from examples/mumbo_plot_2_views_2_classes.py rename to doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py index a2f864eb092a749e645c24697cb56bd80a1aabca..91f0dc7a33e6a921f6f7e79415e7ab8f6d86859a 100644 --- a/examples/mumbo_plot_2_views_2_classes.py +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -2 views, 2 classes example -========================== +================================ +Mumbo 2 views, 2 classes example +================================ In this toy example, we generate data from two classes, split between two two-dimensional views. diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..712a45e277023643abc24e6dc836e990c37c5aca --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst @@ -0,0 +1,170 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py: + + +================================ +Mumbo 2 views, 2 classes example +================================ + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.mumbo import MumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 100 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1), axis=1) + + y = np.zeros(4*n_samples, dtype=np.int64) + y[2*n_samples:] = 1 + + views_ind = np.array([0, 2, 4]) + + n_estimators = 3 + clf = MumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 3 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + + print('\nThe resulting MuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + + print('\nThe two first sub-classifiers use the data of view 0 to compute ' + 'their\nclassification results, while the third one uses the data of ' + 'view 1:\n' + ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + + fig = plt.figure(figsize=(10., 8.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + styles = ('.b', '.r') + fig = plt.figure(figsize=(12., 7.)) + fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) + for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(2): + ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(2): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:') + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mumbo_plot_2_views_2_classes.py <mumbo_plot_2_views_2_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mumbo_plot_2_views_2_classes.ipynb <mumbo_plot_2_views_2_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..42c59a482683c7d52f4b885b7d85822c0db5552f Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..371d83e13a18543e50726e1f01d8665e65d7e3a7 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 3 views, 3 classes example\n================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n 'its classification\\nresults, the second and third sub-classifiers use '\n 'the data of view 1, while the\\nfourth one uses the data of '\n 'view 2:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(3):\n ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/examples/mumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py similarity index 97% rename from examples/mumbo_plot_3_views_3_classes.py rename to doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py index 624a7910a7847541084b34432fd1e6f2edc5a59a..62cd31156bd2c573331131119f4c69982a63e840 100644 --- a/examples/mumbo_plot_3_views_3_classes.py +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -3 views, 3 classes example -========================== +================================ +Mumbo 3 views, 3 classes example +================================ In this toy example, we generate data from three classes, split between three two-dimensional views. diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..4aedb2216b18eb70e919731d5171d3ec627e504b --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst @@ -0,0 +1,164 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py: + + +================================ +Mumbo 3 views, 3 classes example +================================ + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuMBo algorithm take adavantage of the complementarity of +the views to rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.mumbo import MumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 300 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1, view_2), axis=1) + + y = np.zeros(3*n_samples, dtype=np.int64) + y[n_samples:2*n_samples] = 1 + y[2*n_samples:] = 2 + + views_ind = np.array([0, 2, 4, 6]) + + n_estimators = 4 + clf = MumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 4 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + print('\nThe resulting MuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + + print('\nThe first sub-classifier uses the data of view 0 to compute ' + 'its classification\nresults, the second and third sub-classifiers use ' + 'the data of view 1, while the\nfourth one uses the data of ' + 'view 2:\n' + ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + + styles = ('.b', '.r', '.g') + fig = plt.figure(figsize=(12., 11.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + fig = plt.figure(figsize=(14., 11.)) + fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) + for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(3): + ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mumbo_plot_3_views_3_classes.py <mumbo_plot_3_views_3_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mumbo_plot_3_views_3_classes.ipynb <mumbo_plot_3_views_3_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..42c59a482683c7d52f4b885b7d85822c0db5552f Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..bfbb2d3995d82bea63e0b51401412d7705dcb45d --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 2 views, 2 classes example\n================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n 'their\\nclassification results, while the third one uses the data of '\n 'view 1:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(2):\n ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(2):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_2_views_2_classes.py b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py similarity index 81% rename from doc/docmumbo/auto_examples/plot_2_views_2_classes.py rename to doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py index 6d1c788e8be011be31d0c07777d91517a121fe98..91f0dc7a33e6a921f6f7e79415e7ab8f6d86859a 100644 --- a/doc/docmumbo/auto_examples/plot_2_views_2_classes.py +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -2 views, 2 classes example -========================== +================================ +Mumbo 2 views, 2 classes example +================================ In this toy example, we generate data from two classes, split between two two-dimensional views. @@ -22,30 +22,8 @@ The MuMBo algorithm take adavantage of the complementarity of the two views to rightly classify the points. """ -# Université d'Aix Marseille (AMU) - -# Centre National de la Recherche Scientifique (CNRS) - -# Université de Toulon (UTLN). -# Copyright © 2017-2018 AMU, CNRS, UTLN -# -# This file is part of multimodalboost. -# -# multimodalboost is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# multimodalboost is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with multimodalboost. If not, see <http://www.gnu.org/licenses/>. -# -# Author: Florent JAILLET - Laboratoire d'Informatique et Systèmes - UMR 7020 - import numpy as np -from multimodalboost.mumbo import MumboClassifier +from multimodal.boosting.mumbo import MumboClassifier from matplotlib import pyplot as plt diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py.md5 b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py.md5 new file mode 100644 index 0000000000000000000000000000000000000000..a896dd0b30742ed55f39df2f1e015b2cea9ef9a4 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.py.md5 @@ -0,0 +1 @@ +3da5e295c169f88e209f82c289afdd0b \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.rst similarity index 75% rename from doc/docmumbo/auto_examples/plot_2_views_2_classes.rst rename to doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.rst index b578f41b819cbd8d1917cf9f0296d491053f8aab..1904945a162e0e24b61a5fd4ebb3536fded11abe 100644 --- a/doc/docmumbo/auto_examples/plot_2_views_2_classes.rst +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes.rst @@ -1,15 +1,15 @@ .. note:: :class: sphx-glr-download-link-note - Click :ref:`here <sphx_glr_download_auto_examples_plot_2_views_2_classes.py>` to download the full example code + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_mumbo_2_views_2_classes.py>` to download the full example code .. rst-class:: sphx-glr-example-title -.. _sphx_glr_auto_examples_plot_2_views_2_classes.py: +.. _sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_2_views_2_classes.py: -========================== -2 views, 2 classes example -========================== +================================ +Mumbo 2 views, 2 classes example +================================ In this toy example, we generate data from two classes, split between two two-dimensional views. @@ -35,12 +35,12 @@ rightly classify the points. * - .. image:: /auto_examples/images/sphx_glr_plot_2_views_2_classes_001.png + .. image:: /tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_001.png :class: sphx-glr-multi-img * - .. image:: /auto_examples/images/sphx_glr_plot_2_views_2_classes_002.png + .. image:: /tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_2_views_2_classes_002.png :class: sphx-glr-multi-img @@ -71,8 +71,6 @@ rightly classify the points. The second figure displays the classification results for the sub-classifiers on the learning sample data. - /home/dominique/projets/ANR-Lives/multimodal/multimodalboost/examples/plot_2_views_2_classes.py:149: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure. - plt.show() @@ -85,30 +83,8 @@ rightly classify the points. .. code-block:: default - # Université d'Aix Marseille (AMU) - - # Centre National de la Recherche Scientifique (CNRS) - - # Université de Toulon (UTLN). - # Copyright © 2017-2018 AMU, CNRS, UTLN - # - # This file is part of multimodalboost. - # - # multimodalboost is free software: you can redistribute it and/or modify - # it under the terms of the GNU Lesser General Public License as published by - # the Free Software Foundation, either version 3 of the License, or - # (at your option) any later version. - # - # multimodalboost is distributed in the hope that it will be useful, - # but WITHOUT ANY WARRANTY; without even the implied warranty of - # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - # GNU Lesser General Public License for more details. - # - # You should have received a copy of the GNU Lesser General Public License - # along with multimodalboost. If not, see <http://www.gnu.org/licenses/>. - # - # Author: Florent JAILLET - Laboratoire d'Informatique et Systèmes - UMR 7020 - import numpy as np - from multimodalboost.mumbo import MumboClassifier + from multimodal.boosting.mumbo import MumboClassifier from matplotlib import pyplot as plt @@ -214,10 +190,10 @@ rightly classify the points. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.733 seconds) + **Total running time of the script:** ( 0 minutes 0.706 seconds) -.. _sphx_glr_download_auto_examples_plot_2_views_2_classes.py: +.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_mumbo_2_views_2_classes.py: .. only :: html @@ -229,13 +205,13 @@ rightly classify the points. .. container:: sphx-glr-download - :download:`Download Python source code: plot_2_views_2_classes.py <plot_2_views_2_classes.py>` + :download:`Download Python source code: plot_mumbo_2_views_2_classes.py <plot_mumbo_2_views_2_classes.py>` .. container:: sphx-glr-download - :download:`Download Jupyter notebook: plot_2_views_2_classes.ipynb <plot_2_views_2_classes.ipynb>` + :download:`Download Jupyter notebook: plot_mumbo_2_views_2_classes.ipynb <plot_mumbo_2_views_2_classes.ipynb>` .. only:: html diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..494ce44a2d361a6834c25cba16217907e86cbc4e Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/plot_mumbo_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..371d83e13a18543e50726e1f01d8665e65d7e3a7 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 3 views, 3 classes example\n================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n 'its classification\\nresults, the second and third sub-classifiers use '\n 'the data of view 1, while the\\nfourth one uses the data of '\n 'view 2:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(3):\n ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_3_views_3_classes.py b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py similarity index 80% rename from doc/docmumbo/auto_examples/plot_3_views_3_classes.py rename to doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py index 7de610b658890c146929a923712b6033de830d4d..62cd31156bd2c573331131119f4c69982a63e840 100644 --- a/doc/docmumbo/auto_examples/plot_3_views_3_classes.py +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """ -========================== -3 views, 3 classes example -========================== +================================ +Mumbo 3 views, 3 classes example +================================ In this toy example, we generate data from three classes, split between three two-dimensional views. @@ -16,30 +16,8 @@ Nevertheless, the MuMBo algorithm take adavantage of the complementarity of the views to rightly classify the points. """ -# Université d'Aix Marseille (AMU) - -# Centre National de la Recherche Scientifique (CNRS) - -# Université de Toulon (UTLN). -# Copyright © 2017-2018 AMU, CNRS, UTLN -# -# This file is part of multimodalboost. -# -# multimodalboost is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# multimodalboost is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with multimodalboost. If not, see <http://www.gnu.org/licenses/>. -# -# Author: Florent JAILLET - Laboratoire d'Informatique et Systèmes - UMR 7020 - import numpy as np -from multimodalboost.mumbo import MumboClassifier +from multimodal.boosting.mumbo import MumboClassifier from matplotlib import pyplot as plt diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py.md5 b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py.md5 new file mode 100644 index 0000000000000000000000000000000000000000..4cf20db8ce0cae0244fdd68204efea93d332d95a --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.py.md5 @@ -0,0 +1 @@ +ad27973e4efa5744deba52fe840963e6 \ No newline at end of file diff --git a/doc/docmumbo/auto_examples/plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.rst similarity index 75% rename from doc/docmumbo/auto_examples/plot_3_views_3_classes.rst rename to doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.rst index 08d7745229e704927cf9be50fc8b71c21222bcb3..0f550994855a5b918f3017a90284085c8730cac3 100644 --- a/doc/docmumbo/auto_examples/plot_3_views_3_classes.rst +++ b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes.rst @@ -1,15 +1,15 @@ .. note:: :class: sphx-glr-download-link-note - Click :ref:`here <sphx_glr_download_auto_examples_plot_3_views_3_classes.py>` to download the full example code + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_mumbo_3_views_3_classes.py>` to download the full example code .. rst-class:: sphx-glr-example-title -.. _sphx_glr_auto_examples_plot_3_views_3_classes.py: +.. _sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_3_views_3_classes.py: -========================== -3 views, 3 classes example -========================== +================================ +Mumbo 3 views, 3 classes example +================================ In this toy example, we generate data from three classes, split between three two-dimensional views. @@ -29,12 +29,12 @@ the views to rightly classify the points. * - .. image:: /auto_examples/images/sphx_glr_plot_3_views_3_classes_001.png + .. image:: /tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_001.png :class: sphx-glr-multi-img * - .. image:: /auto_examples/images/sphx_glr_plot_3_views_3_classes_002.png + .. image:: /tutorial/auto_examples/mumbo/images/sphx_glr_plot_mumbo_3_views_3_classes_002.png :class: sphx-glr-multi-img @@ -67,8 +67,6 @@ the views to rightly classify the points. The second figure displays the classification results for the sub-classifiers on the learning sample data. - /home/dominique/projets/ANR-Lives/multimodal/multimodalboost/examples/plot_3_views_3_classes.py:143: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure. - plt.show() @@ -81,30 +79,8 @@ the views to rightly classify the points. .. code-block:: default - # Université d'Aix Marseille (AMU) - - # Centre National de la Recherche Scientifique (CNRS) - - # Université de Toulon (UTLN). - # Copyright © 2017-2018 AMU, CNRS, UTLN - # - # This file is part of multimodalboost. - # - # multimodalboost is free software: you can redistribute it and/or modify - # it under the terms of the GNU Lesser General Public License as published by - # the Free Software Foundation, either version 3 of the License, or - # (at your option) any later version. - # - # multimodalboost is distributed in the hope that it will be useful, - # but WITHOUT ANY WARRANTY; without even the implied warranty of - # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - # GNU Lesser General Public License for more details. - # - # You should have received a copy of the GNU Lesser General Public License - # along with multimodalboost. If not, see <http://www.gnu.org/licenses/>. - # - # Author: Florent JAILLET - Laboratoire d'Informatique et Systèmes - UMR 7020 - import numpy as np - from multimodalboost.mumbo import MumboClassifier + from multimodal.boosting.mumbo import MumboClassifier from matplotlib import pyplot as plt @@ -210,10 +186,10 @@ the views to rightly classify the points. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.293 seconds) + **Total running time of the script:** ( 0 minutes 1.321 seconds) -.. _sphx_glr_download_auto_examples_plot_3_views_3_classes.py: +.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_mumbo_3_views_3_classes.py: .. only :: html @@ -225,13 +201,13 @@ the views to rightly classify the points. .. container:: sphx-glr-download - :download:`Download Python source code: plot_3_views_3_classes.py <plot_3_views_3_classes.py>` + :download:`Download Python source code: plot_mumbo_3_views_3_classes.py <plot_mumbo_3_views_3_classes.py>` .. container:: sphx-glr-download - :download:`Download Jupyter notebook: plot_3_views_3_classes.ipynb <plot_3_views_3_classes.ipynb>` + :download:`Download Jupyter notebook: plot_mumbo_3_views_3_classes.ipynb <plot_mumbo_3_views_3_classes.ipynb>` .. only:: html diff --git a/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4edaaa01c2f44f97afa2e921cb7f86a946c9b4d0 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo/plot_mumbo_3_views_3_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst b/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst new file mode 100644 index 0000000000000000000000000000000000000000..008474c57d0d563ca527e0846207527079c56d98 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst @@ -0,0 +1,14 @@ + +:orphan: + +.. _sphx_glr_tutorial_auto_examples_mumbo_sg_execution_times: + +Computation times +================= +**00:02.027** total execution time for **tutorial_auto_examples_mumbo** files: + ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_3_views_3_classes.py` (``plot_mumbo_3_views_3_classes.py``) | 00:01.321 | 0.0 MB | ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_tutorial_auto_examples_mumbo_plot_mumbo_2_views_2_classes.py` (``plot_mumbo_2_views_2_classes.py``) | 00:00.706 | 0.0 MB | ++--------------------------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..bfbb2d3995d82bea63e0b51401412d7705dcb45d --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 2 views, 2 classes example\n================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [1., 2.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n 'their\\nclassification results, while the third one uses the data of '\n 'view 1:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n ax = plt.subplot(2, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n styles = ('.b', 'xb', '.r', 'xr')\n labels = ('non-separated', 'separated')\n for ind in range(4):\n ind_class = ind // 2\n label = labels[(ind + ind_view) % 2]\n ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n styles[ind],\n label='Class {} ({})'.format(ind_class, label))\n ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(2):\n ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(2):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:')\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..91f0dc7a33e6a921f6f7e79415e7ab8f6d86859a --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +================================ +Mumbo 2 views, 2 classes example +================================ + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.mumbo import MumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 100 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1), axis=1) + +y = np.zeros(4*n_samples, dtype=np.int64) +y[2*n_samples:] = 1 + +views_ind = np.array([0, 2, 4]) + +n_estimators = 3 +clf = MumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 3 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + +print('\nThe resulting MuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + +print('\nThe two first sub-classifiers use the data of view 0 to compute ' + 'their\nclassification results, while the third one uses the data of ' + 'view 1:\n' + ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + +fig = plt.figure(figsize=(10., 8.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +styles = ('.b', '.r') +fig = plt.figure(figsize=(12., 7.)) +fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) +for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(2): + ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(2): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:') + +plt.show() diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..91c49038962702084957e4b25d22af4e6bda2012 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst @@ -0,0 +1,170 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py: + + +================================ +Mumbo 2 views, 2 classes example +================================ + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.mumbo import MumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 100 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1), axis=1) + + y = np.zeros(4*n_samples, dtype=np.int64) + y[2*n_samples:] = 1 + + views_ind = np.array([0, 2, 4]) + + n_estimators = 3 + clf = MumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 3 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + + print('\nThe resulting MuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + + print('\nThe two first sub-classifiers use the data of view 0 to compute ' + 'their\nclassification results, while the third one uses the data of ' + 'view 1:\n' + ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + + fig = plt.figure(figsize=(10., 8.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + styles = ('.b', '.r') + fig = plt.figure(figsize=(12., 7.)) + fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) + for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(2): + ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(2): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:') + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mumbo_plot_2_views_2_classes.py <mumbo_plot_2_views_2_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mumbo_plot_2_views_2_classes.ipynb <mumbo_plot_2_views_2_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4c07e368e87374ab293d680db7ba94f21c8c98c2 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..371d83e13a18543e50726e1f01d8665e65d7e3a7 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n================================\nMumbo 3 views, 3 classes example\n================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n \"\"\"Generate random data in a rectangle\"\"\"\n lim = np.array(lim)\n n_features = lim.shape[0]\n data = np.random.random((n_samples, n_features))\n data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n generate_data(n_samples, [[0., 1.], [0., 1.]]),\n generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n 'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n print(' - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n 'wheighted\\nusing the following weights:\\n'\n ' estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n 'its classification\\nresults, the second and third sub-classifiers use '\n 'the data of view 1, while the\\nfourth one uses the data of '\n 'view 2:\\n'\n ' best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n 'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n ax = plt.subplot(3, 1, ind_view + 1)\n ax.set_title('View {}'.format(ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n 'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n 'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n best_view = clf.best_views_[ind_estimator]\n y_pred = clf.estimators_[ind_estimator].predict(\n X[:, 2*best_view:2*best_view+2])\n background_color = (1.0, 1.0, 0.9)\n for ind_view in range(3):\n ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n if ind_view == best_view:\n ax.set_facecolor(background_color)\n ax.set_title(\n 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n ind_feature = ind_view * 2\n for ind_class in range(3):\n ind_samples = (y_pred == ind_class)\n ax.plot(X[ind_samples, ind_feature],\n X[ind_samples, ind_feature + 1],\n styles[ind_class],\n label='Class {}'.format(ind_class))\n ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..62cd31156bd2c573331131119f4c69982a63e840 --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +""" +================================ +Mumbo 3 views, 3 classes example +================================ + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuMBo algorithm take adavantage of the complementarity of +the views to rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.mumbo import MumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 300 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1, view_2), axis=1) + +y = np.zeros(3*n_samples, dtype=np.int64) +y[n_samples:2*n_samples] = 1 +y[2*n_samples:] = 2 + +views_ind = np.array([0, 2, 4, 6]) + +n_estimators = 4 +clf = MumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 4 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + +print('\nThe resulting MuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + +print('\nThe first sub-classifier uses the data of view 0 to compute ' + 'its classification\nresults, the second and third sub-classifiers use ' + 'the data of view 1, while the\nfourth one uses the data of ' + 'view 2:\n' + ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + +styles = ('.b', '.r', '.g') +fig = plt.figure(figsize=(12., 11.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +fig = plt.figure(figsize=(14., 11.)) +fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) +for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(3): + ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + +plt.show() diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f11b4e106ec5d949d8603fc7b0c4393fca613cf --- /dev/null +++ b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst @@ -0,0 +1,164 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py: + + +================================ +Mumbo 3 views, 3 classes example +================================ + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuMBo algorithm take adavantage of the complementarity of +the views to rightly classify the points. + + +.. code-block:: default + + + import numpy as np + from multimodal.boosting.mumbo import MumboClassifier + from matplotlib import pyplot as plt + + + def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + + seed = 12 + np.random.seed(seed) + + n_samples = 300 + + view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + + view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + + view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + + X = np.concatenate((view_0, view_1, view_2), axis=1) + + y = np.zeros(3*n_samples, dtype=np.int64) + y[n_samples:2*n_samples] = 1 + y[2*n_samples:] = 2 + + views_ind = np.array([0, 2, 4, 6]) + + n_estimators = 4 + clf = MumboClassifier(n_estimators=n_estimators) + clf.fit(X, y, views_ind) + + print('\nAfter 4 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') + for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + print('\nThe resulting MuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + + print('\nThe first sub-classifier uses the data of view 0 to compute ' + 'its classification\nresults, the second and third sub-classifiers use ' + 'the data of view 1, while the\nfourth one uses the data of ' + 'view 2:\n' + ' best views: {}'. format(clf.best_views_)) + + print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + + styles = ('.b', '.r', '.g') + fig = plt.figure(figsize=(12., 11.)) + fig.suptitle('Representation of the data', size=16) + for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + + print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + + fig = plt.figure(figsize=(14., 11.)) + fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) + for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(3): + ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mumbo_plot_3_views_3_classes.py <mumbo_plot_3_views_3_classes.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mumbo_plot_3_views_3_classes.ipynb <mumbo_plot_3_views_3_classes.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4c07e368e87374ab293d680db7ba94f21c8c98c2 Binary files /dev/null and b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__001.png b/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__001.png new file mode 100644 index 0000000000000000000000000000000000000000..a6b3fda7e968314e008f0bbd541574ecff9bcc16 Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__001.png differ diff --git a/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__002.png b/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__002.png new file mode 100644 index 0000000000000000000000000000000000000000..76c5b37350dcbf775f6e7452fdf1177fd26a2f2e Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__002.png differ diff --git a/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_mvml_plot__thumb.png b/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_mvml_plot__thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_mvml_plot__thumb.png differ diff --git a/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_plot_mvml__thumb.png b/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_plot_mvml__thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..a235ba57d84ef26f64fbf2fe723e431b9ecb1827 Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/images/thumb/sphx_glr_plot_mvml__thumb.png differ diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb b/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..830427c6c91ffd2da3cc37634a043735a29b0122 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# MVML\n\nDemonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset\n\nDemonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see\nhttp://scikit-learn.org/stable/\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.datasets.data_sample import DataSample\nfrom multimodal.tests.datasets.get_dataset_path import get_dataset_path\n\n\nnp.random.seed(4)\n\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\nplt.figure(figsize=(10., 8.))\nplt.subplot(121)\nplt.scatter(X0[:, 0], X0[:, 1], c=Y)\nplt.title(\"all data, view 1\")\nplt.subplot(122)\nplt.scatter(X1[:, 0], X1[:, 1], c=Y)\nplt.title(\"all data, view 2\")\nplt.show()\n\n# shuffle\norder = np.random.permutation(n_tot)\nX0 = X0[order, :]\nX1 = X1[order, :]\nY = Y[order]\n\n# make kernel dictionaries\nkernel_dict = {}\ntest_kernel_dict = {}\nkernel_dict[0] = rbf_kernel(X0[0:n_tr, :])\nkernel_dict[1] = rbf_kernel(X1[0:n_tr, :])\ntest_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])\ntest_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])\n\n# input_x = get_dataset_path(\"input_x_dic.pkl\")\n# f = open(input_x, \"wb\")\n# pickle.dump(input_x, f)\n#\n#\n# d= DataSample(kernel_dict)\n# a = d.data\n#\n# =========== use MVML in classifying the data ============\n#\n# demo on how the code is intended to be used; parameters are not cross-validated, just picked some\n# # with approximation\n# # default: learn A, don't learn w (learn_A=1, learn_w=0)\nmvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')\nmvml.fit(kernel_dict, Y[0:n_tr])\n\n\n#\n\npred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result\n#\n# without approximation\nmvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation\nmvml2.fit(kernel_dict, Y[0:n_tr])\npred2 = np.sign(mvml2.predict(test_kernel_dict))\n#\n# use MVML_Cov, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')\nmvml3.fit(kernel_dict, Y[0:n_tr])\npred3 = np.sign(mvml.predict(test_kernel_dict))\n#\n# use MVML_I, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')\n\npred4 = np.sign(mvml.predict(test_kernel_dict))\n#\n#\n# # =========== show results ============\n#\n# # accuracies\nacc1 = accuracy_score(Y[n_tr:n_tot], pred1)\nacc2 = accuracy_score(Y[n_tr:n_tot], pred2)\nacc3 = accuracy_score(Y[n_tr:n_tot], pred3)\nacc4 = accuracy_score(Y[n_tr:n_tot], pred4)\n#\n# # display obtained accuracies\n#\nprint(\"MVML: \", acc1)\nprint(\"MVMLsparse: \", acc2)\nprint(\"MVML_Cov: \", acc3)\nprint(\"MVML_I: \", acc4)\n#\n#\n# # plot data and some classification results\n#\nplt.figure(2, figsize=(10., 8.))\nplt.subplot(341)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 1\")\nplt.subplot(342)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 2\")\n#\npred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0\npred1 = pred1.reshape((pred1.shape[0]))\nplt.subplot(343)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 1\")\nplt.subplot(344)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 2\")\n#\npred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0\npred2 = pred2.reshape((pred2.shape[0]))\nplt.subplot(345)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 1\")\nplt.subplot(346)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 2\")\n#\npred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0\npred3 = pred3.reshape((pred3.shape[0]))\n#\nplt.subplot(347)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 1\")\nplt.subplot(348)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 2\")\n#\npred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0\npred4 = pred4.reshape((pred4.shape[0]))\nplt.subplot(349)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 1\")\nplt.subplot(3,4,10)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 2\")\n#\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/docmetricexamples/demo.py b/doc/tutorial/auto_examples/mvml/mvml_plot_.py similarity index 76% rename from doc/docmetricexamples/demo.py rename to doc/tutorial/auto_examples/mvml/mvml_plot_.py index 108ac8c6550c4c0abba5d9fc09573ce815ea024d..1abf1ea4e719f7b258baa05b7d21b32b1fa97988 100644 --- a/doc/docmetricexamples/demo.py +++ b/doc/tutorial/auto_examples/mvml/mvml_plot_.py @@ -1,19 +1,23 @@ +# -*- coding: utf-8 -*- +""" +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ +""" import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.metrics import accuracy_score from sklearn.metrics.pairwise import rbf_kernel -from metriclearning.mvml import MVML -from metriclearning.datasets.data_sample import DataSample -from metriclearning.tests.datasets.get_dataset_path import get_dataset_path -import pickle -""" -Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset +from multimodal.kernels.mvml import MVML +from multimodal.datasets.data_sample import DataSample +from multimodal.tests.datasets.get_dataset_path import get_dataset_path -Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see -http://scikit-learn.org/stable/ -""" np.random.seed(4) @@ -59,14 +63,14 @@ X1 = X1[yinds1, :] Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 # show data -plt.figure(1) +plt.figure(figsize=(10., 8.)) plt.subplot(121) plt.scatter(X0[:, 0], X0[:, 1], c=Y) plt.title("all data, view 1") plt.subplot(122) plt.scatter(X1[:, 0], X1[:, 1], c=Y) plt.title("all data, view 2") -# plt.show() +plt.show() # shuffle order = np.random.permutation(n_tot) @@ -85,60 +89,66 @@ test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) # input_x = get_dataset_path("input_x_dic.pkl") # f = open(input_x, "wb") # pickle.dump(input_x, f) - - -d= DataSample(kernel_dict) -a = d.data - +# +# +# d= DataSample(kernel_dict) +# a = d.data +# # =========== use MVML in classifying the data ============ - +# # demo on how the code is intended to be used; parameters are not cross-validated, just picked some -# mvml = MVML(kernel_dict, Y[0:n_tr], [0.1, 1], nystrom_param=0.2) -mvml = MVML( [0.1, 1], nystrom_param=0.2) -A1, g1, w1 = mvml.fit(a, Y[0:n_tr]) -# with approximation -# mvml = MVML(kernel_dict, Y[0:n_tr], [0.1, 1], nystrom_param=1) # without approximation - -A1, g1, w1 = mvml.learn_mvml() # default: learn A, don't learn w (learn_A=1, learn_w=0) -pred1 = np.sign(mvml.predict_mvml(test_kernel_dict, g1, w1)) # take sign for classification result - -A2, g2, w2 = mvml.learn_mvml(learn_A=2, learn_w=1) # learn sparse A and learn w -pred2 = np.sign(mvml.predict_mvml(test_kernel_dict, g2, w2)) -# print(w2) - -A3, g3, w3 = mvml.learn_mvml(learn_A=3) # use MVML_Cov, don't learn w -pred3 = np.sign(mvml.predict_mvml(test_kernel_dict, g3, w3)) - -A4, g4, w4 = mvml.learn_mvml(learn_A=4) # use MVML_I, don't learn w -pred4 = np.sign(mvml.predict_mvml(test_kernel_dict, g4, w4)) - - -# =========== show results ============ - -# accuracies +# # with approximation +# # default: learn A, don't learn w (learn_A=1, learn_w=0) +mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') +mvml.fit(kernel_dict, Y[0:n_tr]) + + +# + +pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result +# +# without approximation +mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation +mvml2.fit(kernel_dict, Y[0:n_tr]) +pred2 = np.sign(mvml2.predict(test_kernel_dict)) +# +# use MVML_Cov, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') +mvml3.fit(kernel_dict, Y[0:n_tr]) +pred3 = np.sign(mvml.predict(test_kernel_dict)) +# +# use MVML_I, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') + +pred4 = np.sign(mvml.predict(test_kernel_dict)) +# +# +# # =========== show results ============ +# +# # accuracies acc1 = accuracy_score(Y[n_tr:n_tot], pred1) acc2 = accuracy_score(Y[n_tr:n_tot], pred2) acc3 = accuracy_score(Y[n_tr:n_tot], pred3) acc4 = accuracy_score(Y[n_tr:n_tot], pred4) - -# display obtained accuracies - +# +# # display obtained accuracies +# print("MVML: ", acc1) print("MVMLsparse: ", acc2) print("MVML_Cov: ", acc3) print("MVML_I: ", acc4) - - -# plot data and some classification results - -plt.figure(2) +# +# +# # plot data and some classification results +# +plt.figure(2, figsize=(10., 8.)) plt.subplot(341) plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) plt.title("orig. view 1") plt.subplot(342) plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) plt.title("orig. view 2") - +# pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 pred1 = pred1.reshape((pred1.shape[0])) plt.subplot(343) @@ -147,7 +157,7 @@ plt.title("MVML view 1") plt.subplot(344) plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) plt.title("MVML view 2") - +# pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 pred2 = pred2.reshape((pred2.shape[0])) plt.subplot(345) @@ -156,17 +166,17 @@ plt.title("MVMLsparse view 1") plt.subplot(346) plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) plt.title("MVMLsparse view 2") - +# pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 pred3 = pred3.reshape((pred3.shape[0])) - +# plt.subplot(347) plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) plt.title("MVML_Cov view 1") plt.subplot(348) plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) plt.title("MVML_Cov view 2") - +# pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 pred4 = pred4.reshape((pred4.shape[0])) plt.subplot(349) @@ -175,9 +185,5 @@ plt.title("MVML_I view 1") plt.subplot(3,4,10) plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) plt.title("MVML_I view 2") - -# plt.figure(3) -# plt.spy(A2) -# plt.title("sparse learned A") - +# plt.show() diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot_.rst b/doc/tutorial/auto_examples/mvml/mvml_plot_.rst new file mode 100644 index 0000000000000000000000000000000000000000..490cadf3669e83769590207c3f4b31e0cd541590 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/mvml_plot_.rst @@ -0,0 +1,232 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mvml_mvml_plot_.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mvml_mvml_plot_.py: + + +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ + + +.. code-block:: default + + + import numpy as np + import matplotlib.pyplot as plt + from sklearn import datasets + from sklearn.metrics import accuracy_score + from sklearn.metrics.pairwise import rbf_kernel + from multimodal.kernels.mvml import MVML + from multimodal.datasets.data_sample import DataSample + from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + + np.random.seed(4) + + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + plt.figure(figsize=(10., 8.)) + plt.subplot(121) + plt.scatter(X0[:, 0], X0[:, 1], c=Y) + plt.title("all data, view 1") + plt.subplot(122) + plt.scatter(X1[:, 0], X1[:, 1], c=Y) + plt.title("all data, view 2") + plt.show() + + # shuffle + order = np.random.permutation(n_tot) + X0 = X0[order, :] + X1 = X1[order, :] + Y = Y[order] + + # make kernel dictionaries + kernel_dict = {} + test_kernel_dict = {} + kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) + kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) + test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) + test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) + + # input_x = get_dataset_path("input_x_dic.pkl") + # f = open(input_x, "wb") + # pickle.dump(input_x, f) + # + # + # d= DataSample(kernel_dict) + # a = d.data + # + # =========== use MVML in classifying the data ============ + # + # demo on how the code is intended to be used; parameters are not cross-validated, just picked some + # # with approximation + # # default: learn A, don't learn w (learn_A=1, learn_w=0) + mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') + mvml.fit(kernel_dict, Y[0:n_tr]) + + + # + + pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result + # + # without approximation + mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation + mvml2.fit(kernel_dict, Y[0:n_tr]) + pred2 = np.sign(mvml2.predict(test_kernel_dict)) + # + # use MVML_Cov, don't learn w + mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') + mvml3.fit(kernel_dict, Y[0:n_tr]) + pred3 = np.sign(mvml.predict(test_kernel_dict)) + # + # use MVML_I, don't learn w + mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') + + pred4 = np.sign(mvml.predict(test_kernel_dict)) + # + # + # # =========== show results ============ + # + # # accuracies + acc1 = accuracy_score(Y[n_tr:n_tot], pred1) + acc2 = accuracy_score(Y[n_tr:n_tot], pred2) + acc3 = accuracy_score(Y[n_tr:n_tot], pred3) + acc4 = accuracy_score(Y[n_tr:n_tot], pred4) + # + # # display obtained accuracies + # + print("MVML: ", acc1) + print("MVMLsparse: ", acc2) + print("MVML_Cov: ", acc3) + print("MVML_I: ", acc4) + # + # + # # plot data and some classification results + # + plt.figure(2, figsize=(10., 8.)) + plt.subplot(341) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 1") + plt.subplot(342) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 2") + # + pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 + pred1 = pred1.reshape((pred1.shape[0])) + plt.subplot(343) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 1") + plt.subplot(344) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 2") + # + pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 + pred2 = pred2.reshape((pred2.shape[0])) + plt.subplot(345) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 1") + plt.subplot(346) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 2") + # + pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 + pred3 = pred3.reshape((pred3.shape[0])) + # + plt.subplot(347) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 1") + plt.subplot(348) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 2") + # + pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 + pred4 = pred4.reshape((pred4.shape[0])) + plt.subplot(349) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 1") + plt.subplot(3,4,10) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 2") + # + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mvml_mvml_plot_.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mvml_plot_.py <mvml_plot_.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mvml_plot_.ipynb <mvml_plot_.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle b/doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4e0f7c5674378aad85a3f18704b6ebf847fb2265 Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mvml/plot_mvml_.ipynb b/doc/tutorial/auto_examples/mvml/plot_mvml_.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..211b617fe7045ab11b9cd8c229cbb6e55a0b5812 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/plot_mvml_.ipynb @@ -0,0 +1,72 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# MVML\n\nDemonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset\n\nDemonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see\nhttp://scikit-learn.org/stable/\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.datasets.data_sample import DataSample\nfrom multimodal.tests.datasets.get_dataset_path import get_dataset_path\n\n\nnp.random.seed(4)\n\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\nplt.figure(figsize=(10., 8.))\nplt.subplot(121)\nplt.scatter(X0[:, 0], X0[:, 1], c=Y)\nplt.title(\"all data, view 1\")\nplt.subplot(122)\nplt.scatter(X1[:, 0], X1[:, 1], c=Y)\nplt.title(\"all data, view 2\")\nplt.show()\n\n# shuffle\norder = np.random.permutation(n_tot)\nX0 = X0[order, :]\nX1 = X1[order, :]\nY = Y[order]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "make kernel dictionaries\n################################\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "kernel_dict = {}\ntest_kernel_dict = {}\nkernel_dict[0] = rbf_kernel(X0[0:n_tr, :])\nkernel_dict[1] = rbf_kernel(X1[0:n_tr, :])\ntest_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])\ntest_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])\nx_dict = {}\nx_dict[0] = X0[0:n_tr, :]\nx_dict[1] = X1[0:n_tr, :]\ntest_x_dict = {}\ntest_x_dict[0] = X0[n_tr:n_tot, :]\ntest_x_dict[1] = X1[n_tr:n_tot, :]\n# d= DataSample(kernel_dict)\n# a = d.data\n#\n# =========== use MVML in classifying the data ============\n# kernel precomputed\n# demo on how the code is intended to be used; parameters are not cross-validated, just picked some\n# # with approximation\n# # default: learn A, don't learn w (learn_A=1, learn_w=0)\nmvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')\nmvml.fit(kernel_dict, Y[0:n_tr])\n\n\n#\n\npred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result\n#\n# without approximation\nmvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation\nmvml2.fit(kernel_dict, Y[0:n_tr])\npred2 = np.sign(mvml2.predict(test_kernel_dict))\n#\n# use MVML_Cov, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')\nmvml3.fit(kernel_dict, Y[0:n_tr])\npred3 = np.sign(mvml3.predict(test_kernel_dict))\n#\n# use MVML_I, don't learn w\nmvml4 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')\nmvml4.fit(kernel_dict, Y[0:n_tr])\npred4 = np.sign(mvml4.predict(test_kernel_dict))\n#\n# use kernel rbf equivalent to case 1\nmvml5 = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='rbf')\nmvml5.fit(x_dict, Y[0:n_tr])\npred5 = np.sign(mvml5.predict(test_x_dict))\n#\n#\n# # =========== show results ============\n#\n# # accuracies\nacc1 = accuracy_score(Y[n_tr:n_tot], pred1)\nacc2 = accuracy_score(Y[n_tr:n_tot], pred2)\nacc3 = accuracy_score(Y[n_tr:n_tot], pred3)\nacc4 = accuracy_score(Y[n_tr:n_tot], pred4)\nacc5 = accuracy_score(Y[n_tr:n_tot], pred5)\n#\n# # display obtained accuracies\n#\nprint(\"MVML: \", acc1)\nprint(\"MVMLsparse: \", acc2)\nprint(\"MVML_Cov: \", acc3)\nprint(\"MVML_I: \", acc4)\nprint(\"MVML_rbf: \", acc5)\n#\n#\n# # plot data and some classification results\n#\nplt.figure(2, figsize=(10., 8.))\nplt.subplot(341)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 1\")\nplt.subplot(342)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 2\")\n#\npred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0\npred1 = pred1.reshape((pred1.shape[0]))\nplt.subplot(343)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 1\")\nplt.subplot(344)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 2\")\n#\npred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0\npred2 = pred2.reshape((pred2.shape[0]))\nplt.subplot(345)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 1\")\nplt.subplot(346)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 2\")\n#\npred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0\npred3 = pred3.reshape((pred3.shape[0]))\n#\nplt.subplot(347)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 1\")\nplt.subplot(348)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 2\")\n#\npred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0\npred4 = pred4.reshape((pred4.shape[0]))\nplt.subplot(349)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 1\")\nplt.subplot(3,4,10)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 2\")\n#\npred5[np.where(pred5[:, 0] != Y[n_tr:n_tot])] = 0\npred5 = pred5.reshape((pred5.shape[0]))\nplt.subplot(3,4,11)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred5)\nplt.title(\"MVML_rbf_kernel view 1\")\nplt.subplot(3,4,12)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred5)\nplt.title(\"MVML_rbf_kernel view 2\")\n#\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/mvml/plot_mvml_.py b/doc/tutorial/auto_examples/mvml/plot_mvml_.py new file mode 100644 index 0000000000000000000000000000000000000000..a449a7c868af0b92677a8703e6ca12e2685d9e35 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/plot_mvml_.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +""" +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ +""" + +import numpy as np +import matplotlib.pyplot as plt +from sklearn import datasets +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import rbf_kernel +from multimodal.kernels.mvml import MVML +from multimodal.datasets.data_sample import DataSample +from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + +np.random.seed(4) + +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +plt.figure(figsize=(10., 8.)) +plt.subplot(121) +plt.scatter(X0[:, 0], X0[:, 1], c=Y) +plt.title("all data, view 1") +plt.subplot(122) +plt.scatter(X1[:, 0], X1[:, 1], c=Y) +plt.title("all data, view 2") +plt.show() + +# shuffle +order = np.random.permutation(n_tot) +X0 = X0[order, :] +X1 = X1[order, :] +Y = Y[order] +################################## +# make kernel dictionaries +################################# +kernel_dict = {} +test_kernel_dict = {} +kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) +kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) +test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) +test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) +x_dict = {} +x_dict[0] = X0[0:n_tr, :] +x_dict[1] = X1[0:n_tr, :] +test_x_dict = {} +test_x_dict[0] = X0[n_tr:n_tot, :] +test_x_dict[1] = X1[n_tr:n_tot, :] +# d= DataSample(kernel_dict) +# a = d.data +# +# =========== use MVML in classifying the data ============ +# kernel precomputed +# demo on how the code is intended to be used; parameters are not cross-validated, just picked some +# # with approximation +# # default: learn A, don't learn w (learn_A=1, learn_w=0) +mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') +mvml.fit(kernel_dict, Y[0:n_tr]) + + +# + +pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result +# +# without approximation +mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation +mvml2.fit(kernel_dict, Y[0:n_tr]) +pred2 = np.sign(mvml2.predict(test_kernel_dict)) +# +# use MVML_Cov, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') +mvml3.fit(kernel_dict, Y[0:n_tr]) +pred3 = np.sign(mvml3.predict(test_kernel_dict)) +# +# use MVML_I, don't learn w +mvml4 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') +mvml4.fit(kernel_dict, Y[0:n_tr]) +pred4 = np.sign(mvml4.predict(test_kernel_dict)) +# +# use kernel rbf equivalent to case 1 +mvml5 = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='rbf') +mvml5.fit(x_dict, Y[0:n_tr]) +pred5 = np.sign(mvml5.predict(test_x_dict)) +# +# +# # =========== show results ============ +# +# # accuracies +acc1 = accuracy_score(Y[n_tr:n_tot], pred1) +acc2 = accuracy_score(Y[n_tr:n_tot], pred2) +acc3 = accuracy_score(Y[n_tr:n_tot], pred3) +acc4 = accuracy_score(Y[n_tr:n_tot], pred4) +acc5 = accuracy_score(Y[n_tr:n_tot], pred5) +# +# # display obtained accuracies +# +print("MVML: ", acc1) +print("MVMLsparse: ", acc2) +print("MVML_Cov: ", acc3) +print("MVML_I: ", acc4) +print("MVML_rbf: ", acc5) +# +# +# # plot data and some classification results +# +plt.figure(2, figsize=(10., 8.)) +plt.subplot(341) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 1") +plt.subplot(342) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 2") +# +pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 +pred1 = pred1.reshape((pred1.shape[0])) +plt.subplot(343) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 1") +plt.subplot(344) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 2") +# +pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 +pred2 = pred2.reshape((pred2.shape[0])) +plt.subplot(345) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 1") +plt.subplot(346) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 2") +# +pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 +pred3 = pred3.reshape((pred3.shape[0])) +# +plt.subplot(347) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 1") +plt.subplot(348) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 2") +# +pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 +pred4 = pred4.reshape((pred4.shape[0])) +plt.subplot(349) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 1") +plt.subplot(3,4,10) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 2") +# +pred5[np.where(pred5[:, 0] != Y[n_tr:n_tot])] = 0 +pred5 = pred5.reshape((pred5.shape[0])) +plt.subplot(3,4,11) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred5) +plt.title("MVML_rbf_kernel view 1") +plt.subplot(3,4,12) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred5) +plt.title("MVML_rbf_kernel view 2") +# +plt.show() diff --git a/doc/tutorial/auto_examples/mvml/plot_mvml_.py.md5 b/doc/tutorial/auto_examples/mvml/plot_mvml_.py.md5 new file mode 100644 index 0000000000000000000000000000000000000000..4ee8d03a224fd6a5860f0384a9b9b66efff9550a --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/plot_mvml_.py.md5 @@ -0,0 +1 @@ +870a37ab95c98874716ccd8b360cbc36 \ No newline at end of file diff --git a/doc/tutorial/auto_examples/mvml/plot_mvml_.rst b/doc/tutorial/auto_examples/mvml/plot_mvml_.rst new file mode 100644 index 0000000000000000000000000000000000000000..cce0d8f36c9539a07b91880169c0f5b6fd17f6b2 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/plot_mvml_.rst @@ -0,0 +1,299 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mvml_plot_mvml_.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mvml_plot_mvml_.py: + + +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ + + +.. code-block:: default + + + import numpy as np + import matplotlib.pyplot as plt + from sklearn import datasets + from sklearn.metrics import accuracy_score + from sklearn.metrics.pairwise import rbf_kernel + from multimodal.kernels.mvml import MVML + from multimodal.datasets.data_sample import DataSample + from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + + np.random.seed(4) + + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + plt.figure(figsize=(10., 8.)) + plt.subplot(121) + plt.scatter(X0[:, 0], X0[:, 1], c=Y) + plt.title("all data, view 1") + plt.subplot(122) + plt.scatter(X1[:, 0], X1[:, 1], c=Y) + plt.title("all data, view 2") + plt.show() + + # shuffle + order = np.random.permutation(n_tot) + X0 = X0[order, :] + X1 = X1[order, :] + Y = Y[order] + + + +.. image:: /tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__001.png + :class: sphx-glr-single-img + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + /home/dominique/projets/ANR-Lives/scikit-multimodallearn/examples/mvml/plot_mvml_.py:73: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure. + plt.show() + + + + +make kernel dictionaries +################################ + + +.. code-block:: default + + kernel_dict = {} + test_kernel_dict = {} + kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) + kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) + test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) + test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) + x_dict = {} + x_dict[0] = X0[0:n_tr, :] + x_dict[1] = X1[0:n_tr, :] + test_x_dict = {} + test_x_dict[0] = X0[n_tr:n_tot, :] + test_x_dict[1] = X1[n_tr:n_tot, :] + # d= DataSample(kernel_dict) + # a = d.data + # + # =========== use MVML in classifying the data ============ + # kernel precomputed + # demo on how the code is intended to be used; parameters are not cross-validated, just picked some + # # with approximation + # # default: learn A, don't learn w (learn_A=1, learn_w=0) + mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') + mvml.fit(kernel_dict, Y[0:n_tr]) + + + # + + pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result + # + # without approximation + mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation + mvml2.fit(kernel_dict, Y[0:n_tr]) + pred2 = np.sign(mvml2.predict(test_kernel_dict)) + # + # use MVML_Cov, don't learn w + mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') + mvml3.fit(kernel_dict, Y[0:n_tr]) + pred3 = np.sign(mvml3.predict(test_kernel_dict)) + # + # use MVML_I, don't learn w + mvml4 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') + mvml4.fit(kernel_dict, Y[0:n_tr]) + pred4 = np.sign(mvml4.predict(test_kernel_dict)) + # + # use kernel rbf equivalent to case 1 + mvml5 = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='rbf') + mvml5.fit(x_dict, Y[0:n_tr]) + pred5 = np.sign(mvml5.predict(test_x_dict)) + # + # + # # =========== show results ============ + # + # # accuracies + acc1 = accuracy_score(Y[n_tr:n_tot], pred1) + acc2 = accuracy_score(Y[n_tr:n_tot], pred2) + acc3 = accuracy_score(Y[n_tr:n_tot], pred3) + acc4 = accuracy_score(Y[n_tr:n_tot], pred4) + acc5 = accuracy_score(Y[n_tr:n_tot], pred5) + # + # # display obtained accuracies + # + print("MVML: ", acc1) + print("MVMLsparse: ", acc2) + print("MVML_Cov: ", acc3) + print("MVML_I: ", acc4) + print("MVML_rbf: ", acc5) + # + # + # # plot data and some classification results + # + plt.figure(2, figsize=(10., 8.)) + plt.subplot(341) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 1") + plt.subplot(342) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 2") + # + pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 + pred1 = pred1.reshape((pred1.shape[0])) + plt.subplot(343) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 1") + plt.subplot(344) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 2") + # + pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 + pred2 = pred2.reshape((pred2.shape[0])) + plt.subplot(345) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 1") + plt.subplot(346) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 2") + # + pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 + pred3 = pred3.reshape((pred3.shape[0])) + # + plt.subplot(347) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 1") + plt.subplot(348) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 2") + # + pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 + pred4 = pred4.reshape((pred4.shape[0])) + plt.subplot(349) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 1") + plt.subplot(3,4,10) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 2") + # + pred5[np.where(pred5[:, 0] != Y[n_tr:n_tot])] = 0 + pred5 = pred5.reshape((pred5.shape[0])) + plt.subplot(3,4,11) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred5) + plt.title("MVML_rbf_kernel view 1") + plt.subplot(3,4,12) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred5) + plt.title("MVML_rbf_kernel view 2") + # + plt.show() + + + +.. image:: /tutorial/auto_examples/mvml/images/sphx_glr_plot_mvml__002.png + :class: sphx-glr-single-img + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + WARNING:root:warning appears during fit process{'precond_A': 1, 'precond_A_1': 1} + WARNING:root:warning appears during fit process{'precond_A': 1, 'precond_A_1': 4} + WARNING:root:warning appears during fit process{'precond_A': 1} + WARNING:root:warning appears during fit process{'precond_A_1': 1} + WARNING:root:warning appears during fit process{'precond_A': 1, 'precond_A_1': 1} + MVML: 0.7875 + MVMLsparse: 0.8375 + MVML_Cov: 0.85 + MVML_I: 0.8625 + MVML_rbf: 0.7875 + /home/dominique/projets/ANR-Lives/scikit-multimodallearn/examples/mvml/plot_mvml_.py:206: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure. + plt.show() + + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 5.285 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mvml_plot_mvml_.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: plot_mvml_.py <plot_mvml_.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: plot_mvml_.ipynb <plot_mvml_.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mvml/plot_mvml__codeobj.pickle b/doc/tutorial/auto_examples/mvml/plot_mvml__codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..25388d951eb345c5a8f5fb3a7b5596ef70993a1f Binary files /dev/null and b/doc/tutorial/auto_examples/mvml/plot_mvml__codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/mvml/sg_execution_times.rst b/doc/tutorial/auto_examples/mvml/sg_execution_times.rst new file mode 100644 index 0000000000000000000000000000000000000000..bca9ff828c1213ad8844db81d6ff96e69c4dc832 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml/sg_execution_times.rst @@ -0,0 +1,12 @@ + +:orphan: + +.. _sphx_glr_tutorial_auto_examples_mvml_sg_execution_times: + +Computation times +================= +**00:05.285** total execution time for **tutorial_auto_examples_mvml** files: + ++-------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_tutorial_auto_examples_mvml_plot_mvml_.py` (``plot_mvml_.py``) | 00:05.285 | 0.0 MB | ++-------------------------------------------------------------------------------+-----------+--------+ diff --git a/doc/tutorial/auto_examples/mvml_plot_.ipynb b/doc/tutorial/auto_examples/mvml_plot_.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..830427c6c91ffd2da3cc37634a043735a29b0122 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml_plot_.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# MVML\n\nDemonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset\n\nDemonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see\nhttp://scikit-learn.org/stable/\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.datasets.data_sample import DataSample\nfrom multimodal.tests.datasets.get_dataset_path import get_dataset_path\n\n\nnp.random.seed(4)\n\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1\n\n# show data\nplt.figure(figsize=(10., 8.))\nplt.subplot(121)\nplt.scatter(X0[:, 0], X0[:, 1], c=Y)\nplt.title(\"all data, view 1\")\nplt.subplot(122)\nplt.scatter(X1[:, 0], X1[:, 1], c=Y)\nplt.title(\"all data, view 2\")\nplt.show()\n\n# shuffle\norder = np.random.permutation(n_tot)\nX0 = X0[order, :]\nX1 = X1[order, :]\nY = Y[order]\n\n# make kernel dictionaries\nkernel_dict = {}\ntest_kernel_dict = {}\nkernel_dict[0] = rbf_kernel(X0[0:n_tr, :])\nkernel_dict[1] = rbf_kernel(X1[0:n_tr, :])\ntest_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])\ntest_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])\n\n# input_x = get_dataset_path(\"input_x_dic.pkl\")\n# f = open(input_x, \"wb\")\n# pickle.dump(input_x, f)\n#\n#\n# d= DataSample(kernel_dict)\n# a = d.data\n#\n# =========== use MVML in classifying the data ============\n#\n# demo on how the code is intended to be used; parameters are not cross-validated, just picked some\n# # with approximation\n# # default: learn A, don't learn w (learn_A=1, learn_w=0)\nmvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')\nmvml.fit(kernel_dict, Y[0:n_tr])\n\n\n#\n\npred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result\n#\n# without approximation\nmvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation\nmvml2.fit(kernel_dict, Y[0:n_tr])\npred2 = np.sign(mvml2.predict(test_kernel_dict))\n#\n# use MVML_Cov, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')\nmvml3.fit(kernel_dict, Y[0:n_tr])\npred3 = np.sign(mvml.predict(test_kernel_dict))\n#\n# use MVML_I, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')\n\npred4 = np.sign(mvml.predict(test_kernel_dict))\n#\n#\n# # =========== show results ============\n#\n# # accuracies\nacc1 = accuracy_score(Y[n_tr:n_tot], pred1)\nacc2 = accuracy_score(Y[n_tr:n_tot], pred2)\nacc3 = accuracy_score(Y[n_tr:n_tot], pred3)\nacc4 = accuracy_score(Y[n_tr:n_tot], pred4)\n#\n# # display obtained accuracies\n#\nprint(\"MVML: \", acc1)\nprint(\"MVMLsparse: \", acc2)\nprint(\"MVML_Cov: \", acc3)\nprint(\"MVML_I: \", acc4)\n#\n#\n# # plot data and some classification results\n#\nplt.figure(2, figsize=(10., 8.))\nplt.subplot(341)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 1\")\nplt.subplot(342)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 2\")\n#\npred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0\npred1 = pred1.reshape((pred1.shape[0]))\nplt.subplot(343)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 1\")\nplt.subplot(344)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 2\")\n#\npred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0\npred2 = pred2.reshape((pred2.shape[0]))\nplt.subplot(345)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 1\")\nplt.subplot(346)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 2\")\n#\npred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0\npred3 = pred3.reshape((pred3.shape[0]))\n#\nplt.subplot(347)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 1\")\nplt.subplot(348)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 2\")\n#\npred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0\npred4 = pred4.reshape((pred4.shape[0]))\nplt.subplot(349)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 1\")\nplt.subplot(3,4,10)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 2\")\n#\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorial/auto_examples/mvml_plot_.py b/doc/tutorial/auto_examples/mvml_plot_.py new file mode 100644 index 0000000000000000000000000000000000000000..1abf1ea4e719f7b258baa05b7d21b32b1fa97988 --- /dev/null +++ b/doc/tutorial/auto_examples/mvml_plot_.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +""" +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ +""" + +import numpy as np +import matplotlib.pyplot as plt +from sklearn import datasets +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import rbf_kernel +from multimodal.kernels.mvml import MVML +from multimodal.datasets.data_sample import DataSample +from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + +np.random.seed(4) + +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +plt.figure(figsize=(10., 8.)) +plt.subplot(121) +plt.scatter(X0[:, 0], X0[:, 1], c=Y) +plt.title("all data, view 1") +plt.subplot(122) +plt.scatter(X1[:, 0], X1[:, 1], c=Y) +plt.title("all data, view 2") +plt.show() + +# shuffle +order = np.random.permutation(n_tot) +X0 = X0[order, :] +X1 = X1[order, :] +Y = Y[order] + +# make kernel dictionaries +kernel_dict = {} +test_kernel_dict = {} +kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) +kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) +test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) +test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) + +# input_x = get_dataset_path("input_x_dic.pkl") +# f = open(input_x, "wb") +# pickle.dump(input_x, f) +# +# +# d= DataSample(kernel_dict) +# a = d.data +# +# =========== use MVML in classifying the data ============ +# +# demo on how the code is intended to be used; parameters are not cross-validated, just picked some +# # with approximation +# # default: learn A, don't learn w (learn_A=1, learn_w=0) +mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') +mvml.fit(kernel_dict, Y[0:n_tr]) + + +# + +pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result +# +# without approximation +mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation +mvml2.fit(kernel_dict, Y[0:n_tr]) +pred2 = np.sign(mvml2.predict(test_kernel_dict)) +# +# use MVML_Cov, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') +mvml3.fit(kernel_dict, Y[0:n_tr]) +pred3 = np.sign(mvml.predict(test_kernel_dict)) +# +# use MVML_I, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') + +pred4 = np.sign(mvml.predict(test_kernel_dict)) +# +# +# # =========== show results ============ +# +# # accuracies +acc1 = accuracy_score(Y[n_tr:n_tot], pred1) +acc2 = accuracy_score(Y[n_tr:n_tot], pred2) +acc3 = accuracy_score(Y[n_tr:n_tot], pred3) +acc4 = accuracy_score(Y[n_tr:n_tot], pred4) +# +# # display obtained accuracies +# +print("MVML: ", acc1) +print("MVMLsparse: ", acc2) +print("MVML_Cov: ", acc3) +print("MVML_I: ", acc4) +# +# +# # plot data and some classification results +# +plt.figure(2, figsize=(10., 8.)) +plt.subplot(341) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 1") +plt.subplot(342) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 2") +# +pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 +pred1 = pred1.reshape((pred1.shape[0])) +plt.subplot(343) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 1") +plt.subplot(344) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 2") +# +pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 +pred2 = pred2.reshape((pred2.shape[0])) +plt.subplot(345) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 1") +plt.subplot(346) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 2") +# +pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 +pred3 = pred3.reshape((pred3.shape[0])) +# +plt.subplot(347) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 1") +plt.subplot(348) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 2") +# +pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 +pred4 = pred4.reshape((pred4.shape[0])) +plt.subplot(349) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 1") +plt.subplot(3,4,10) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 2") +# +plt.show() diff --git a/doc/tutorial/auto_examples/mvml_plot_.rst b/doc/tutorial/auto_examples/mvml_plot_.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d5e65d8aaf8cfb0540e2b5dd1fe669b16a1274d --- /dev/null +++ b/doc/tutorial/auto_examples/mvml_plot_.rst @@ -0,0 +1,232 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mvml_plot_.py>` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorial_auto_examples_mvml_plot_.py: + + +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ + + +.. code-block:: default + + + import numpy as np + import matplotlib.pyplot as plt + from sklearn import datasets + from sklearn.metrics import accuracy_score + from sklearn.metrics.pairwise import rbf_kernel + from multimodal.kernels.mvml import MVML + from multimodal.datasets.data_sample import DataSample + from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + + np.random.seed(4) + + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + # =========== create a simple dataset ============ + + n_tot = 200 + half = int(n_tot/2) + n_tr = 120 + + # create a bit more data than needed so that we can take "half" amount of samples for each class + X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) + X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + + # make multi-view correspondence (select equal number of samples for both classes and order the data same way + # in both views) + + yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) + yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + + X0 = X0[yinds0, :] + X1 = X1[yinds1, :] + Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + + # show data + plt.figure(figsize=(10., 8.)) + plt.subplot(121) + plt.scatter(X0[:, 0], X0[:, 1], c=Y) + plt.title("all data, view 1") + plt.subplot(122) + plt.scatter(X1[:, 0], X1[:, 1], c=Y) + plt.title("all data, view 2") + plt.show() + + # shuffle + order = np.random.permutation(n_tot) + X0 = X0[order, :] + X1 = X1[order, :] + Y = Y[order] + + # make kernel dictionaries + kernel_dict = {} + test_kernel_dict = {} + kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) + kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) + test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) + test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) + + # input_x = get_dataset_path("input_x_dic.pkl") + # f = open(input_x, "wb") + # pickle.dump(input_x, f) + # + # + # d= DataSample(kernel_dict) + # a = d.data + # + # =========== use MVML in classifying the data ============ + # + # demo on how the code is intended to be used; parameters are not cross-validated, just picked some + # # with approximation + # # default: learn A, don't learn w (learn_A=1, learn_w=0) + mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') + mvml.fit(kernel_dict, Y[0:n_tr]) + + + # + + pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result + # + # without approximation + mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation + mvml2.fit(kernel_dict, Y[0:n_tr]) + pred2 = np.sign(mvml2.predict(test_kernel_dict)) + # + # use MVML_Cov, don't learn w + mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') + mvml3.fit(kernel_dict, Y[0:n_tr]) + pred3 = np.sign(mvml.predict(test_kernel_dict)) + # + # use MVML_I, don't learn w + mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') + + pred4 = np.sign(mvml.predict(test_kernel_dict)) + # + # + # # =========== show results ============ + # + # # accuracies + acc1 = accuracy_score(Y[n_tr:n_tot], pred1) + acc2 = accuracy_score(Y[n_tr:n_tot], pred2) + acc3 = accuracy_score(Y[n_tr:n_tot], pred3) + acc4 = accuracy_score(Y[n_tr:n_tot], pred4) + # + # # display obtained accuracies + # + print("MVML: ", acc1) + print("MVMLsparse: ", acc2) + print("MVML_Cov: ", acc3) + print("MVML_I: ", acc4) + # + # + # # plot data and some classification results + # + plt.figure(2, figsize=(10., 8.)) + plt.subplot(341) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 1") + plt.subplot(342) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) + plt.title("orig. view 2") + # + pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 + pred1 = pred1.reshape((pred1.shape[0])) + plt.subplot(343) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 1") + plt.subplot(344) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) + plt.title("MVML view 2") + # + pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 + pred2 = pred2.reshape((pred2.shape[0])) + plt.subplot(345) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 1") + plt.subplot(346) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) + plt.title("MVMLsparse view 2") + # + pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 + pred3 = pred3.reshape((pred3.shape[0])) + # + plt.subplot(347) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 1") + plt.subplot(348) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) + plt.title("MVML_Cov view 2") + # + pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 + pred4 = pred4.reshape((pred4.shape[0])) + plt.subplot(349) + plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 1") + plt.subplot(3,4,10) + plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) + plt.title("MVML_I view 2") + # + plt.show() + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_tutorial_auto_examples_mvml_plot_.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: mvml_plot_.py <mvml_plot_.py>` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: mvml_plot_.ipynb <mvml_plot_.ipynb>` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_ diff --git a/doc/tutorial/auto_examples/mvml_plot__codeobj.pickle b/doc/tutorial/auto_examples/mvml_plot__codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..3a2d50c1dbdf979684f97440eb387b7c4bff82be Binary files /dev/null and b/doc/tutorial/auto_examples/mvml_plot__codeobj.pickle differ diff --git a/doc/tutorial/auto_examples/sg_execution_times.rst b/doc/tutorial/auto_examples/sg_execution_times.rst new file mode 100644 index 0000000000000000000000000000000000000000..299a794c335123196d31463dad8f9b2a186c2615 --- /dev/null +++ b/doc/tutorial/auto_examples/sg_execution_times.rst @@ -0,0 +1,18 @@ + +:orphan: + +.. _sphx_glr_tutorial_auto_examples_sg_execution_times: + +Computation times +================= +**00:02.336** total execution time for **tutorial_auto_examples** files: + ++-----------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_mumbo_plot_3_views_3_classes.py` (``mumbo_plot_3_views_3_classes.py``) | 00:01.313 | 0.0 MB | ++-----------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_mumbo_plot_2_views_2_classes.py` (``mumbo_plot_2_views_2_classes.py``) | 00:01.023 | 0.0 MB | ++-----------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_cumbo_plot_3_views_3_classes.py` (``cumbo_plot_3_views_3_classes.py``) | 00:01.313 | 0.0 MB | ++-----------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_cumbo_plot_2_views_2_classes.py` (``cumbo_plot_2_views_2_classes.py``) | 00:01.023 | 0.0 MB | ++-----------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.decision_function.examples b/doc/tutorial/backreferences/metriclearning.datasets.base.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.decision_function.examples rename to doc/tutorial/backreferences/metriclearning.datasets.base.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.examples b/doc/tutorial/backreferences/metriclearning.datasets.base.load_dict.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.examples rename to doc/tutorial/backreferences/metriclearning.datasets.base.load_dict.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.fit.examples b/doc/tutorial/backreferences/metriclearning.datasets.base.load_npz_X_y.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.fit.examples rename to doc/tutorial/backreferences/metriclearning.datasets.base.load_npz_X_y.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.get_params.examples b/doc/tutorial/backreferences/metriclearning.datasets.base.save_dict.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.get_params.examples rename to doc/tutorial/backreferences/metriclearning.datasets.base.save_dict.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.predict.examples b/doc/tutorial/backreferences/metriclearning.datasets.base.save_npz_X_y.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.predict.examples rename to doc/tutorial/backreferences/metriclearning.datasets.base.save_npz_X_y.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.score.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.DataSample.data.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.score.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.DataSample.data.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.set_params.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.DataSample.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.set_params.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.DataSample.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_decision_function.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.add_view.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_decision_function.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.add_view.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_predict.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_predict.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_score.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_col.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.staged_score.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_col.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_raw.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.examples rename to doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_raw.examples diff --git a/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_view.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.get_view.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.set_view.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.Metriclearn_array.set_view.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.datasets.data_sample.examples b/doc/tutorial/backreferences/metriclearning.datasets.data_sample.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.fit.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.learn_lpMKL.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.learn_lpMKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.lpMKL_predict.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.lpMKL_predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.predict.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.MKL.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.lpMKL.examples b/doc/tutorial/backreferences/metriclearning.lpMKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mkernel.MKernel.examples b/doc/tutorial/backreferences/metriclearning.mkernel.MKernel.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mkernel.examples b/doc/tutorial/backreferences/metriclearning.mkernel.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.MVML.examples b/doc/tutorial/backreferences/metriclearning.mvml.MVML.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.MVML.fit.examples b/doc/tutorial/backreferences/metriclearning.mvml.MVML.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.MVML.learn_mvml.examples b/doc/tutorial/backreferences/metriclearning.mvml.MVML.learn_mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.MVML.predict.examples b/doc/tutorial/backreferences/metriclearning.mvml.MVML.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.MVML.predict_mvml.examples b/doc/tutorial/backreferences/metriclearning.mvml.MVML.predict_mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/metriclearning.mvml.examples b/doc/tutorial/backreferences/metriclearning.mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.boost.UBoosting.examples b/doc/tutorial/backreferences/multimodal.boosting.boost.UBoosting.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.boost.examples b/doc/tutorial/backreferences/multimodal.boosting.boost.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.decision_function.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.fit.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.get_params.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.get_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.predict.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.score.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.set_params.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.set_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_decision_function.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_predict.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_score.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.MuCumboClassifier.staged_score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.cumbo.examples b/doc/tutorial/backreferences/multimodal.boosting.cumbo.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.decision_function.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.fit.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.get_params.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.get_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.predict.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.score.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.set_params.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.set_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_decision_function.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_predict.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_score.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.MumboClassifier.staged_score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.boosting.mumbo.examples b/doc/tutorial/backreferences/multimodal.boosting.mumbo.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.clear.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.clear.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.copy.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.copy.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.data.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.data.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.fromkeys.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.fromkeys.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.get.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.get.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.items.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.items.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.keys.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.keys.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.pop.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.pop.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.popitem.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.popitem.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.setdefault.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.setdefault.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.update.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.update.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.values.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.DataSample.values.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.T.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.T.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.all.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.all.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.any.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.any.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argmax.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argmax.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argmin.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argmin.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argpartition.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argpartition.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argsort.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.argsort.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.astype.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.astype.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.base.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.base.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.byteswap.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.byteswap.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.choose.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.choose.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.clip.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.clip.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.compress.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.compress.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.conj.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.conj.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.conjugate.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.conjugate.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.copy.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.copy.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ctypes.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ctypes.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.cumprod.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.cumprod.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.cumsum.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.cumsum.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.data.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.data.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.diagonal.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.diagonal.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dot.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dot.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dtype.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dtype.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dump.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dump.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dumps.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.dumps.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.fill.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.fill.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flags.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flags.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flat.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flat.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flatten.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.flatten.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.getfield.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.getfield.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.imag.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.imag.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.item.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.item.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.itemset.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.itemset.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.itemsize.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.itemsize.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.max.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.max.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.mean.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.mean.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.min.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.min.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.nbytes.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.nbytes.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ndim.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ndim.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.newbyteorder.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.newbyteorder.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.nonzero.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.nonzero.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.partition.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.partition.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.prod.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.prod.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ptp.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ptp.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.put.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.put.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ravel.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.ravel.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.real.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.real.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.repeat.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.repeat.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.reshape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.reshape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.resize.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.resize.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.round.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.round.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.searchsorted.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.searchsorted.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.setfield.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.setfield.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.setflags.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.setflags.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.shape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.shape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.size.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.size.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.sort.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.sort.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.squeeze.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.squeeze.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.std.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.std.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.strides.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.strides.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.sum.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.sum.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.swapaxes.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.swapaxes.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.take.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.take.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tobytes.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tobytes.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tofile.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tofile.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tolist.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tolist.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tostring.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.tostring.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.trace.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.trace.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.transpose.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.transpose.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.var.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.var.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.view.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalArray.view.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arcsin.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arcsin.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arcsinh.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arcsinh.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arctan.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arctan.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arctanh.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.arctanh.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.argmax.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.argmax.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.argmin.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.argmin.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.asformat.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.asformat.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.asfptype.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.asfptype.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.astype.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.astype.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.ceil.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.ceil.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.check_format.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.check_format.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.conj.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.conj.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.conjugate.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.conjugate.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.copy.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.copy.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.count_nonzero.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.count_nonzero.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.deg2rad.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.deg2rad.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.diagonal.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.diagonal.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.dot.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.dot.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.eliminate_zeros.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.eliminate_zeros.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.expm1.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.expm1.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.floor.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.floor.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getH.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getH.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.get_shape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.get_shape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getcol.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getcol.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getformat.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getformat.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getmaxprint.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getmaxprint.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getnnz.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getnnz.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getrow.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.getrow.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.has_canonical_format.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.has_canonical_format.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.has_sorted_indices.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.has_sorted_indices.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.log1p.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.log1p.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.max.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.max.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.maximum.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.maximum.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.mean.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.mean.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.min.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.min.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.minimum.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.minimum.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.multiply.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.multiply.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.nnz.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.nnz.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.nonzero.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.nonzero.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.power.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.power.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.prune.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.prune.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.rad2deg.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.rad2deg.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.reshape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.reshape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.resize.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.resize.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.rint.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.rint.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.set_shape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.set_shape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.setdiag.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.setdiag.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.shape.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.shape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sign.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sign.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sin.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sin.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sinh.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sinh.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sort_indices.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sort_indices.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sorted_indices.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sorted_indices.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sqrt.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sqrt.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sum.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sum.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sum_duplicates.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.sum_duplicates.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tan.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tan.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tanh.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tanh.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.toarray.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.toarray.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tobsr.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tobsr.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocoo.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocoo.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocsc.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocsc.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocsr.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tocsr.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todense.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todense.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todia.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todia.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todok.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.todok.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tolil.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.tolil.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.transpose.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.transpose.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.trunc.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.MultiModalSparseArray.trunc.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.datasets.data_sample.examples b/doc/tutorial/backreferences/multimodal.datasets.data_sample.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.fit.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.get_params.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.get_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.learn_lpMKL.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.learn_lpMKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.lpMKL_predict.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.lpMKL_predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.predict.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.score.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.set_params.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.MKL.set_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.lpMKL.examples b/doc/tutorial/backreferences/multimodal.kernels.lpMKL.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mkernel.MKernel.examples b/doc/tutorial/backreferences/multimodal.kernels.mkernel.MKernel.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mkernel.examples b/doc/tutorial/backreferences/multimodal.kernels.mkernel.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.fit.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.get_params.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.get_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.learn_mvml.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.learn_mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.predict.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.predict_mvml.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.predict_mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.score.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.set_params.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.MVML.set_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodal.kernels.mvml.examples b/doc/tutorial/backreferences/multimodal.kernels.mvml.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/docmumbo/backreferences/multimodalboost.MumboClassifier.examples b/doc/tutorial/backreferences/multimodalboost.MumboClassifier.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.MumboClassifier.examples rename to doc/tutorial/backreferences/multimodalboost.MumboClassifier.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.MumboClassifier.fit.examples b/doc/tutorial/backreferences/multimodalboost.MumboClassifier.fit.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.MumboClassifier.fit.examples rename to doc/tutorial/backreferences/multimodalboost.MumboClassifier.fit.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.MumboClassifier.staged_score.examples b/doc/tutorial/backreferences/multimodalboost.MumboClassifier.staged_score.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.MumboClassifier.staged_score.examples rename to doc/tutorial/backreferences/multimodalboost.MumboClassifier.staged_score.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.best_views_.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.best_views_.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.best_views_.examples rename to doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.best_views_.examples diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.decision_function.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.estimator_weights_.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.estimator_weights_.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.estimator_weights_.examples rename to doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.estimator_weights_.examples diff --git a/doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.estimators_.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.estimators_.examples similarity index 100% rename from doc/docmumbo/backreferences/multimodalboost.mumbo.MumboClassifier.estimators_.examples rename to doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.estimators_.examples diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.fit.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.fit.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.get_params.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.get_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.predict.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.score.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.set_params.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.set_params.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_decision_function.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_decision_function.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_predict.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_predict.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_score.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.MumboClassifier.staged_score.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/multimodalboost.mumbo.examples b/doc/tutorial/backreferences/multimodalboost.mumbo.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.data.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.data.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.data_as.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.data_as.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.shape.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.shape.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.shape_as.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.shape_as.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.strides.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.strides.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/tutorial/backreferences/numpy.core._internal._ctypes.strides_as.examples b/doc/tutorial/backreferences/numpy.core._internal._ctypes.strides_as.examples new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/docmumbo/credits.rst b/doc/tutorial/credits.rst similarity index 53% rename from doc/docmumbo/credits.rst rename to doc/tutorial/credits.rst index a4ff1ecf7a2fe0964f74db99196e52a41940b9f3..999b01e2d75649e69a54cf931d9035b25dcce67c 100644 --- a/doc/docmumbo/credits.rst +++ b/doc/tutorial/credits.rst @@ -1,11 +1,11 @@ Credits ======= -**multimodalboost** is developped by the +**scikit-multimodallearnt** is developped by the `development team <https://developpement.lis-lab.fr/>`_ of the -`LIS <http://www.lis-lab.fr/>`_. +`LIS <http://www.lis-lab.fr/>`_. supported by `ANR Lives <https://lives.lif.univ-mrs.fr/>`_ -If you use **multimodalboost** in a scientific publication, please cite the +If you use **scikit-multimodallearn** in a scientific publication, please cite the following paper:: @InProceedings{Koco:2011:BAM, @@ -27,9 +27,33 @@ following paper:: supervised learning}, } + + @InProceedings{Huu:2019:BAMCC, + author={Huusari, Riika, Kadri Hachem and Capponi, C{\'e}cile}, + editor={}, + title={Multi-view Metric Learning in Vector-valued Kernel Spaces}, + booktitle={arXiv:1803.07821v1}, + year={2018}, + location={Athens, Greece}, + publisher={}, + address={}, + pages={209--228}, + numpages = {12} + isbn={978-3-642-23783-6} + url={https://link.springer.com/chapter/10.1007/978-3-642-23783-6_14}, + keywords={boosting, classification, multiview learning, + merric learning, vector-valued, kernel spaces}, + } + References ---------- +* Sokol Koço, Cécile Capponi, + `"Learning from Imbalanced Datasets with cross-view cooperation"` + Linking and mining heterogeneous an multi-view data, Unsupervised and + semi-supervised learning Series Editor M. Emre Celeri, pp 161-182, Springer + + * Sokol Koço, Cécile Capponi, `"A boosting approach to multiview classification with cooperation" <https://link.springer.com/chapter/10.1007/978-3-642-23783-6_14>`_, @@ -41,6 +65,11 @@ References learning methods" <http://www.theses.fr/en/2013AIXM4101>`_, PhD Thesis, Aix-Marseille Université, 2013. +* Riikka Huusari, Hachem Kadri and Cécile Capponi, + "Multi-View Metric Learning in Vector-Valued Kernel Spaces" + in International Conference on Artificial Intelligence and Statistics (AISTATS) 2018 + + Copyright --------- @@ -53,7 +82,5 @@ Copyright © 2017-2018 AMU, CNRS, UTLN License ------- -**multimodalboost** is free software: you can redistribute it and/or modify -it under the terms of the **GNU Lesser General Public License** as published by -the Free Software Foundation, either **version 3** of the License, or -(at your option) any later version. +**scikit-multimodallearn** is free software: you can redistribute it and/or modify +it under the terms of the **New BSD License** diff --git a/doc/doccumbo/install_devel.rst b/doc/tutorial/install_devel.rst similarity index 69% rename from doc/doccumbo/install_devel.rst rename to doc/tutorial/install_devel.rst index eb63b9006cd975bc6c3014ca19c814d6cd3c4c61..215d921e6ff5e0ae6976c11137006e613fc37d49 100644 --- a/doc/doccumbo/install_devel.rst +++ b/doc/tutorial/install_devel.rst @@ -4,37 +4,37 @@ Installation and development Dependencies ------------ -**multiconfusion** works with **Python 3.5 or later**. +**scikit-multimodallearn** works with **Python 3.5 or later**. -**multiconfusion** depends on **scikit-learn** (version >= 0.19). +**scikit-multimodallearn** depends on **scikit-learn** (version >= 0.19) and **cvxopt**. Optionally, **matplotlib** is required when running the examples. Installation ------------ -**multiconfusion** is -`available on PyPI <https://pypi.org/project/multimodalboost/>`_ +**scikit-multimodallearn* is +`available on PyPI <https://pypi.org/project/scikit-multimodallearn/>`_ and can be installed using **pip**:: - pip install multiconfusion + pip install multimodalboost If you prefer to install directly from the **source code**, clone the **Git** repository of the project and run the **setup.py** file with the following commands:: - git clone git@gitlab.lis-lab.fr:dev/multiconfusion.git + git clone git@gitlab.lis-lab.fr:dev/scikit-multimodallearn.git cd multimodalboost python setup.py install or alternatively use **pip**:: - pip install git+https://gitlab.lis-lab.fr/dev/multiconfusion.git + pip install git+https://gitlab.lis-lab.fr/dev/scikit-multimodallearn.git Development ----------- -The development of multimodalboost follows the guidelines provided by the +The development of scikit-multimodallearn follows the guidelines provided by the scikit-learn community. Refer to the `Developer's Guide <http://scikit-learn.org/stable/developers>`_ @@ -45,7 +45,7 @@ Source code You can get the **source code** from the **Git** repository of the project:: - git clone git@gitlab.lis-lab.fr:dev/multiconfusion.git + git clone git@gitlab.lis-lab.fr:dev/scikit-multimodallearn.git Testing @@ -53,7 +53,7 @@ Testing **pytest** and **pytest-cov** are required to run the **test suite** with:: - pytest-3 + pytest A code coverage report is displayed in the terminal when running the tests. An HTML version of the report is also stored in the directory **htmlcov**. @@ -66,10 +66,4 @@ The generation of the documentation requires **sphinx**, **sphinx-gallery**, python setup.py build_sphinx -ou:: - - cd doc - sphinx-build -b html . ./build - - The resulting files are stored in the directory **build/sphinx/html**. diff --git a/examples/README.txt b/examples/README.txt index f749137f56fe6950ead3682bcee3a44915567dea..192d2d5bb0550bef2607150598331a11b7fbe438 100644 --- a/examples/README.txt +++ b/examples/README.txt @@ -1,11 +1,7 @@ .. _examples: -Examples -======== +Multimodal Examples +=================== -MuMBo Examples --------------- -The following toy examples illustrate how the MuMBo algorithm exploits -cooperation between views for classification. diff --git a/examples/cumbo/README.txt b/examples/cumbo/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..41a77320d8d1d793d2b98a483bc28bc6268b5ea7 --- /dev/null +++ b/examples/cumbo/README.txt @@ -0,0 +1,11 @@ +.. _examples: + +Examples +======== + +MuCuMBo Examples +---------------- + +The following toy examples illustrate how the MuCuMBo algorithm exploits +cooperation between views for classification. + diff --git a/examples/cumbo/plot_cumbo_2_views_2_classes.py b/examples/cumbo/plot_cumbo_2_views_2_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..c669f6919c71f964891940bdfa3c1276e7342992 --- /dev/null +++ b/examples/cumbo/plot_cumbo_2_views_2_classes.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCombo 2 views, 2 classes example +================================== + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuCuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 100 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1), axis=1) + +y = np.zeros(4*n_samples, dtype=np.int64) +y[2*n_samples:] = 1 + +views_ind = np.array([0, 2, 4]) + +n_estimators = 3 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + +print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe two first sub-classifiers use the data of view 0 to compute ' +# 'their\nclassification results, while the third one uses the data of ' +# 'view 1:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + +fig = plt.figure(figsize=(10., 8.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +styles = ('.b', '.r') +# fig = plt.figure(figsize=(12., 7.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(2): +# ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(2): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:') + +plt.show() diff --git a/examples/cumbo/plot_cumbo_3_views_3_classes.py b/examples/cumbo/plot_cumbo_3_views_3_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..058b2dc7e6562d83d19de511031f19f90b567926 --- /dev/null +++ b/examples/cumbo/plot_cumbo_3_views_3_classes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +""" +================================== +MuCumbo 3 views, 3 classes example +================================== + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of +the views to rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.cumbo import MuCumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 300 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1, view_2), axis=1) + +y = np.zeros(3*n_samples, dtype=np.int64) +y[n_samples:2*n_samples] = 1 +y[2*n_samples:] = 2 + +views_ind = np.array([0, 2, 4, 6]) + +n_estimators = 4 +clf = MuCumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + +print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights alpha: {}'.format(clf.estimator_weights_alpha_)) + +# print('\nThe first sub-classifier uses the data of view 0 to compute ' +# 'its classification\nresults, the second and third sub-classifiers use ' +# 'the data of view 1, while the\nfourth one uses the data of ' +# 'view 2:\n' +# ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + +styles = ('.b', '.r', '.g') +fig = plt.figure(figsize=(12., 11.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +# fig = plt.figure(figsize=(14., 11.)) +# fig.suptitle('Classification results on the learning data for the ' +# 'sub-classifiers', size=16) +# for ind_estimator in range(n_estimators): +# best_view = clf.best_views_[ind_estimator] +# y_pred = clf.estimators_[ind_estimator].predict( +# X[:, 2*best_view:2*best_view+2]) +# background_color = (1.0, 1.0, 0.9) +# for ind_view in range(3): +# ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) +# if ind_view == best_view: +# ax.set_facecolor(background_color) +# ax.set_title( +# 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) +# ind_feature = ind_view * 2 +# for ind_class in range(3): +# ind_samples = (y_pred == ind_class) +# ax.plot(X[ind_samples, ind_feature], +# X[ind_samples, ind_feature + 1], +# styles[ind_class], +# label='Class {}'.format(ind_class)) +# ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + +plt.show() diff --git a/examples/mumbo/README.txt b/examples/mumbo/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..f749137f56fe6950ead3682bcee3a44915567dea --- /dev/null +++ b/examples/mumbo/README.txt @@ -0,0 +1,11 @@ +.. _examples: + +Examples +======== + +MuMBo Examples +-------------- + +The following toy examples illustrate how the MuMBo algorithm exploits +cooperation between views for classification. + diff --git a/examples/mumbo/plot_mumbo_2_views_2_classes.py b/examples/mumbo/plot_mumbo_2_views_2_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..91f0dc7a33e6a921f6f7e79415e7ab8f6d86859a --- /dev/null +++ b/examples/mumbo/plot_mumbo_2_views_2_classes.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +================================ +Mumbo 2 views, 2 classes example +================================ + +In this toy example, we generate data from two classes, split between two +two-dimensional views. + +For each view, the data are generated so that half of the points of each class +are well separated in the plane, while the other half of the points are not +separated and placed in the same area. We also insure that the points that are +not separated in one view are well separated in the other view. + +Thus, in the figure representing the data, the points represented by crosses +(x) are well separated in view 0 while they are not separated in view 1, while +the points represented by dots (.) are well separated in view 1 while they are +not separated in view 0. In this figure, the blue symbols represent points +of class 0, while red symbols represent points of class 1. + +The MuMBo algorithm take adavantage of the complementarity of the two views to +rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.mumbo import MumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 100 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [1., 2.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1), axis=1) + +y = np.zeros(4*n_samples, dtype=np.int64) +y[2*n_samples:] = 1 + +views_ind = np.array([0, 2, 4]) + +n_estimators = 3 +clf = MumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 3 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + + +print('\nThe resulting MuMBo classifier uses three sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + +print('\nThe two first sub-classifiers use the data of view 0 to compute ' + 'their\nclassification results, while the third one uses the data of ' + 'view 1:\n' + ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\ntwo views.') + +fig = plt.figure(figsize=(10., 8.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(2): + ax = plt.subplot(2, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + styles = ('.b', 'xb', '.r', 'xr') + labels = ('non-separated', 'separated') + for ind in range(4): + ind_class = ind // 2 + label = labels[(ind + ind_view) % 2] + ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature], + X[n_samples*ind:n_samples*(ind+1), ind_feature + 1], + styles[ind], + label='Class {} ({})'.format(ind_class, label)) + ax.legend() + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +styles = ('.b', '.r') +fig = plt.figure(figsize=(12., 7.)) +fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) +for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(2): + ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(2): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:') + +plt.show() diff --git a/examples/mumbo/plot_mumbo_3_views_3_classes.py b/examples/mumbo/plot_mumbo_3_views_3_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..62cd31156bd2c573331131119f4c69982a63e840 --- /dev/null +++ b/examples/mumbo/plot_mumbo_3_views_3_classes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +""" +================================ +Mumbo 3 views, 3 classes example +================================ + +In this toy example, we generate data from three classes, split between three +two-dimensional views. + +For each view, the data are generated so that the points for two classes are +well seperated, while the points for the third class are not seperated with +the two other classes. That means that, taken separately, none of the single +views allows for a good classification of the data. + +Nevertheless, the MuMBo algorithm take adavantage of the complementarity of +the views to rightly classify the points. +""" + +import numpy as np +from multimodal.boosting.mumbo import MumboClassifier +from matplotlib import pyplot as plt + + +def generate_data(n_samples, lim): + """Generate random data in a rectangle""" + lim = np.array(lim) + n_features = lim.shape[0] + data = np.random.random((n_samples, n_features)) + data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0] + return data + + +seed = 12 +np.random.seed(seed) + +n_samples = 300 + +view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]))) + +view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]))) + +view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]), + generate_data(n_samples, [[0., 1.], [0., 1.]]), + generate_data(n_samples, [[1., 2.], [0., 1.]]))) + +X = np.concatenate((view_0, view_1, view_2), axis=1) + +y = np.zeros(3*n_samples, dtype=np.int64) +y[n_samples:2*n_samples] = 1 +y[2*n_samples:] = 2 + +views_ind = np.array([0, 2, 4, 6]) + +n_estimators = 4 +clf = MumboClassifier(n_estimators=n_estimators) +clf.fit(X, y, views_ind) + +print('\nAfter 4 iterations, the MuMBo classifier reaches exact ' + 'classification for the\nlearning samples:') +for ind, score in enumerate(clf.staged_score(X, y)): + print(' - iteration {}, score: {}'.format(ind + 1, score)) + +print('\nThe resulting MuMBo classifier uses four sub-classifiers that are ' + 'wheighted\nusing the following weights:\n' + ' estimator weights: {}'.format(clf.estimator_weights_)) + +print('\nThe first sub-classifier uses the data of view 0 to compute ' + 'its classification\nresults, the second and third sub-classifiers use ' + 'the data of view 1, while the\nfourth one uses the data of ' + 'view 2:\n' + ' best views: {}'. format(clf.best_views_)) + +print('\nThe first figure displays the data, splitting the representation ' + 'between the\nthree views.') + +styles = ('.b', '.r', '.g') +fig = plt.figure(figsize=(12., 11.)) +fig.suptitle('Representation of the data', size=16) +for ind_view in range(3): + ax = plt.subplot(3, 1, ind_view + 1) + ax.set_title('View {}'.format(ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(loc='upper left', framealpha=0.9) + +print('\nThe second figure displays the classification results for the ' + 'sub-classifiers\non the learning sample data.\n') + +fig = plt.figure(figsize=(14., 11.)) +fig.suptitle('Classification results on the learning data for the ' + 'sub-classifiers', size=16) +for ind_estimator in range(n_estimators): + best_view = clf.best_views_[ind_estimator] + y_pred = clf.estimators_[ind_estimator].predict( + X[:, 2*best_view:2*best_view+2]) + background_color = (1.0, 1.0, 0.9) + for ind_view in range(3): + ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1) + if ind_view == best_view: + ax.set_facecolor(background_color) + ax.set_title( + 'Sub-classifier {} - View {}'.format(ind_estimator, ind_view)) + ind_feature = ind_view * 2 + for ind_class in range(3): + ind_samples = (y_pred == ind_class) + ax.plot(X[ind_samples, ind_feature], + X[ind_samples, ind_feature + 1], + styles[ind_class], + label='Class {}'.format(ind_class)) + ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9) + +plt.show() diff --git a/examples/mvml/README.txt b/examples/mvml/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..b310f9fa89b020fe70c3908c062706ebe7e9d4f6 --- /dev/null +++ b/examples/mvml/README.txt @@ -0,0 +1,10 @@ +.. _examples: + +Examples +======== + +MVML +---- + +The following toy examples illustrate how the MVML algorithm + diff --git a/examples/mvml/plot_mvml_.py b/examples/mvml/plot_mvml_.py new file mode 100644 index 0000000000000000000000000000000000000000..a449a7c868af0b92677a8703e6ca12e2685d9e35 --- /dev/null +++ b/examples/mvml/plot_mvml_.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +""" +==== +MVML +==== +Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset + +Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see +http://scikit-learn.org/stable/ +""" + +import numpy as np +import matplotlib.pyplot as plt +from sklearn import datasets +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import rbf_kernel +from multimodal.kernels.mvml import MVML +from multimodal.datasets.data_sample import DataSample +from multimodal.tests.datasets.get_dataset_path import get_dataset_path + + +np.random.seed(4) + +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +# =========== create a simple dataset ============ + +n_tot = 200 +half = int(n_tot/2) +n_tr = 120 + +# create a bit more data than needed so that we can take "half" amount of samples for each class +X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False) +X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False) + +# make multi-view correspondence (select equal number of samples for both classes and order the data same way +# in both views) + +yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half]) +yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half]) + +X0 = X0[yinds0, :] +X1 = X1[yinds1, :] +Y = np.append(np.zeros(half)-1, np.ones(half)) # labels -1 and 1 + +# show data +plt.figure(figsize=(10., 8.)) +plt.subplot(121) +plt.scatter(X0[:, 0], X0[:, 1], c=Y) +plt.title("all data, view 1") +plt.subplot(122) +plt.scatter(X1[:, 0], X1[:, 1], c=Y) +plt.title("all data, view 2") +plt.show() + +# shuffle +order = np.random.permutation(n_tot) +X0 = X0[order, :] +X1 = X1[order, :] +Y = Y[order] +################################## +# make kernel dictionaries +################################# +kernel_dict = {} +test_kernel_dict = {} +kernel_dict[0] = rbf_kernel(X0[0:n_tr, :]) +kernel_dict[1] = rbf_kernel(X1[0:n_tr, :]) +test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :]) +test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :]) +x_dict = {} +x_dict[0] = X0[0:n_tr, :] +x_dict[1] = X1[0:n_tr, :] +test_x_dict = {} +test_x_dict[0] = X0[n_tr:n_tot, :] +test_x_dict[1] = X1[n_tr:n_tot, :] +# d= DataSample(kernel_dict) +# a = d.data +# +# =========== use MVML in classifying the data ============ +# kernel precomputed +# demo on how the code is intended to be used; parameters are not cross-validated, just picked some +# # with approximation +# # default: learn A, don't learn w (learn_A=1, learn_w=0) +mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed') +mvml.fit(kernel_dict, Y[0:n_tr]) + + +# + +pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result +# +# without approximation +mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed') # without approximation +mvml2.fit(kernel_dict, Y[0:n_tr]) +pred2 = np.sign(mvml2.predict(test_kernel_dict)) +# +# use MVML_Cov, don't learn w +mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed') +mvml3.fit(kernel_dict, Y[0:n_tr]) +pred3 = np.sign(mvml3.predict(test_kernel_dict)) +# +# use MVML_I, don't learn w +mvml4 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed') +mvml4.fit(kernel_dict, Y[0:n_tr]) +pred4 = np.sign(mvml4.predict(test_kernel_dict)) +# +# use kernel rbf equivalent to case 1 +mvml5 = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='rbf') +mvml5.fit(x_dict, Y[0:n_tr]) +pred5 = np.sign(mvml5.predict(test_x_dict)) +# +# +# # =========== show results ============ +# +# # accuracies +acc1 = accuracy_score(Y[n_tr:n_tot], pred1) +acc2 = accuracy_score(Y[n_tr:n_tot], pred2) +acc3 = accuracy_score(Y[n_tr:n_tot], pred3) +acc4 = accuracy_score(Y[n_tr:n_tot], pred4) +acc5 = accuracy_score(Y[n_tr:n_tot], pred5) +# +# # display obtained accuracies +# +print("MVML: ", acc1) +print("MVMLsparse: ", acc2) +print("MVML_Cov: ", acc3) +print("MVML_I: ", acc4) +print("MVML_rbf: ", acc5) +# +# +# # plot data and some classification results +# +plt.figure(2, figsize=(10., 8.)) +plt.subplot(341) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 1") +plt.subplot(342) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot]) +plt.title("orig. view 2") +# +pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0 +pred1 = pred1.reshape((pred1.shape[0])) +plt.subplot(343) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 1") +plt.subplot(344) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1) +plt.title("MVML view 2") +# +pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0 +pred2 = pred2.reshape((pred2.shape[0])) +plt.subplot(345) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 1") +plt.subplot(346) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2) +plt.title("MVMLsparse view 2") +# +pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0 +pred3 = pred3.reshape((pred3.shape[0])) +# +plt.subplot(347) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 1") +plt.subplot(348) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3) +plt.title("MVML_Cov view 2") +# +pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0 +pred4 = pred4.reshape((pred4.shape[0])) +plt.subplot(349) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 1") +plt.subplot(3,4,10) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4) +plt.title("MVML_I view 2") +# +pred5[np.where(pred5[:, 0] != Y[n_tr:n_tot])] = 0 +pred5 = pred5.reshape((pred5.shape[0])) +plt.subplot(3,4,11) +plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred5) +plt.title("MVML_rbf_kernel view 1") +plt.subplot(3,4,12) +plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred5) +plt.title("MVML_rbf_kernel view 2") +# +plt.show() diff --git a/multimodal/boosting/cumbo.py b/multimodal/boosting/cumbo.py index 6753e71b30e88d48683dfe5dc50e1f4a816282a6..0d8d42c5cade316febd1f6b3b3feed09e35cb7fa 100644 --- a/multimodal/boosting/cumbo.py +++ b/multimodal/boosting/cumbo.py @@ -74,10 +74,10 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): Empirical loss for each iteration. - best_views\_ : numpy.ndarray of integers, shape = (len(estimators\_),) + best\_views\_ : numpy.ndarray of integers, shape = (len(estimators\_),) Indices of the best view for each estimator in the boosted ensemble. - n_yi : numpy ndarray of int contains number of train sample for each classe shape (n_classes,) + n_yi\_ : numpy ndarray of int contains number of train sample for each classe shape (n_classes,) Examples -------- @@ -247,10 +247,10 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): predicted_classes[np.arange(n_views), :]] cost = np.exp( label_score - - label_score[:, np.arange(n_samples), y][:, :, np.newaxis]) / self.n_yi[np.newaxis, np.newaxis, :] + - label_score[:, np.arange(n_samples), y][:, :, np.newaxis]) / self.n_yi_[np.newaxis, np.newaxis, :] score_function_dif = np.exp( label_score - - label_score[:, np.arange(n_samples), y][:, :, np.newaxis]) / self.n_yi[np.newaxis, np.newaxis, :] + - label_score[:, np.arange(n_samples), y][:, :, np.newaxis]) / self.n_yi_[np.newaxis, np.newaxis, :] cost[:, np.arange(n_samples), y] -= np.sum(cost, axis=2) return (cost, label_score, score_function_dif) @@ -304,7 +304,7 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): solvers.options['show_progress'] = False n_view = self.n_views_ m = self.n_classes_ - coef = 1.0/np.tile(self.n_yi, self.n_views_).squeeze() * score_function_dif_Tminus1 + coef = 1.0/np.tile(self.n_yi_, self.n_views_).squeeze() * score_function_dif_Tminus1 zeta_v = np.repeat(alphas, self.n_classes_) * indicate_vue * delta_vue zeta_v_yi = np.repeat(alphas, self.n_classes_) * indicate_vue_yi * delta_vue zeta = zeta_v + zeta_v_yi @@ -325,8 +325,8 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): try: solver = solvers.cp(F, A=A, b=b, G=G, h=h, dim={'l':2*n_view*m})['x'] except ValueError or ArithmeticError or OverflowError as e: - norm = np.sum(1.0/self.n_yi) - yi_norm = self.n_yi * (norm ) + norm = np.sum(1.0/self.n_yi_) + yi_norm = self.n_yi_ * (norm ) solver = matrix(1.0/np.tile(yi_norm, n_view).squeeze(), (n_view * m, 1)) print("Value Error on the evaluation on beta coefficient %s "% e) return solver @@ -349,7 +349,11 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): Parameters ---------- - X : {array-like, sparse matrix}, shape = (n_samples, n_features) + X : dict dictionary with all views + or + `MultiModalData` , `MultiModalArray`, `MultiModalSparseArray` + or + {array-like, sparse matrix}, shape = (n_samples, n_features) Training multi-view input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK and LIL are converted to CSR. @@ -387,8 +391,7 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): ValueError where `X` and `view_ind` are not compatibles """ - warnings.filterwarnings("ignore") - self.X_ = self._global_X_transform(X, views_ind=views_ind) + warnings.filterwarnings("ignore", category=RuntimeWarning) if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): @@ -398,8 +401,7 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): dtype = None accept_sparse = ['csr', 'csc'] - - + self.X_ = self._global_X_transform(X, views_ind=views_ind) views_ind_, n_views = self.X_._validate_views_ind(self.X_.views_ind, self.X_.shape[1]) check_X_y(self.X_, y) @@ -410,7 +412,7 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) self.n_views_ = n_views - self.n_features_ = X.shape[1] + self.n_features_ = self.X_.shape[1] if self.n_classes_ == 1: # This case would lead to division by 0 when computing the cost # matrix so it needs special handling (but it is an obvious case as @@ -431,7 +433,7 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): random_state = check_random_state(self.random_state) (cost, label_score, label_score_global, predicted_classes, score_function_dif, betas, n_yi) = self._init_var(n_views, y) - self.n_yi = n_yi + self.n_yi_ = n_yi for current_iteration in range(self.n_iterations_): # list de h pris a l'etape t dist = self._compute_dist(cost, y) @@ -647,7 +649,6 @@ class MuCumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): score : float Mean accuracy of self.predict(X) wrt. y. """ - X = self._validate_X_predict(X) return super(MuCumboClassifier, self).score(X, y) def staged_score(self, X, y): diff --git a/multimodal/boosting/mumbo.py b/multimodal/boosting/mumbo.py index e9eabb6cf6edfb016ca3f47890a67bb46e31a2d5..3ddee635db2147f90959b974b9d14f5264620953 100644 --- a/multimodal/boosting/mumbo.py +++ b/multimodal/boosting/mumbo.py @@ -275,7 +275,11 @@ class MumboClassifier(BaseEnsemble, ClassifierMixin, UBoosting): Parameters ---------- - X : {array-like, sparse matrix}, shape = (n_samples, n_features) + X : dict dictionary with all views + or + `MultiModalData` , `MultiModalArray`, `MultiModalSparseArray` + or + {array-like, sparse matrix}, shape = (n_samples, n_features) Training multi-view input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK and LIL are converted to CSR. diff --git a/multimodal/datasets/base.py b/multimodal/datasets/base.py index 3d58c39c79cb76c331159cf7cfb39513386d239b..560db713cd096e5fe27d742b939269872ed67982 100644 --- a/multimodal/datasets/base.py +++ b/multimodal/datasets/base.py @@ -25,7 +25,7 @@ def load_dict(filename_): return ret_di -def _create_pickle_files(self, adr, dsample): +def _create_pickle_files(adr, dsample): f = open(adr + ".sample.pkl", "wb") pickle.dump(dsample, f) f.close() diff --git a/multimodal/datasets/data_sample.py b/multimodal/datasets/data_sample.py index 037bcbc7642c910271cd57513628e952a652700d..7543874494cfc893394e1bb6c394a6c0f605fde5 100644 --- a/multimodal/datasets/data_sample.py +++ b/multimodal/datasets/data_sample.py @@ -219,7 +219,6 @@ class MultiModalSparseArray(sp.csr_matrix, sp.csc_matrix, MultiModalSparseInfo, >>> data = load_dict(get_dataset_path(file)) """ - def __init__(self, *arg, **kwargs ): """Constructor of Metriclearn_array""" if sp.issparse(arg[0]): diff --git a/multimodal/kernels/lpMKL.py b/multimodal/kernels/lpMKL.py index 9de928aec4d16c4dfb2f691c31816b03f1a6f7c3..cb051c03540f654cd5fc49e8edb70a602a86661a 100644 --- a/multimodal/kernels/lpMKL.py +++ b/multimodal/kernels/lpMKL.py @@ -243,17 +243,14 @@ class MKL(BaseEstimator, ClassifierMixin, MKernel): Parameters ---------- - X : - Metriclearn_array {array-like, sparse matrix}, shape = (n_samples, n_features) - Training multi-view input samples. can be also Kernel where attibute 'kernel' - is set to precompute "precomputed" - - - Dictionary of {array like} with shape = (n_samples, n_features) for multi-view - for each view. - - - Array of {array like} with shape = (n_samples, n_features) for multi-view - for each view. - - - {array like} with (n_samples, nviews * n_features) with 'views_ind' diferent to 'None' + X : dict dictionary with all views {array like} with shape = (n_samples, n_features) for multi-view + for each view. + or + `MultiModalData` , `MultiModalArray` + or + {array-like,}, shape = (n_samples, n_features) + Training multi-view input samples. can be also Kernel where attibute 'kernel' + is set to precompute "precomputed" views_ind : array-like (default=[0, n_features//2, n_features]) Paramater specifying how to extract the data views from X: @@ -320,5 +317,19 @@ class MKL(BaseEstimator, ClassifierMixin, MKernel): return np.dot(kernel, C) + def score(self, X, y): + """Return the mean accuracy on the given test data and labels. + + Parameters + ---------- + X : {array-like} of shape = (n_samples, n_features) + y : array-like, shape = (n_samples,) + True labels for X. + Returns + ------- + score : float + Mean accuracy of self.predict(X) wrt. y. + """ + return super(MKL, self).score(X, y) diff --git a/multimodal/kernels/mvml.py b/multimodal/kernels/mvml.py index 6d585d0a7d3002cd3c8eae018ad47deb5dd28219..3fb46b41de161d5e02f2152006eef1a6faf36219 100644 --- a/multimodal/kernels/mvml.py +++ b/multimodal/kernels/mvml.py @@ -89,6 +89,7 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin): y_ : array-like, shape = (n_samples,) Target values (class labels). + """ # r_cond = 10-30 @@ -550,3 +551,19 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin): v * m:(v + 1) * m] return A_new + + def score(self, X, y): + """Return the mean accuracy on the given test data and labels. + + Parameters + ---------- + X : {array-like} of shape = (n_samples, n_features) + y : array-like, shape = (n_samples,) + True labels for X. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) wrt. y. + """ + return super(MVML, self).score(X, y) \ No newline at end of file diff --git a/multimodal/tests/datasets/input_x_dic.pkl b/multimodal/tests/datasets/input_x_dic.pkl index ac879caaa3bd4b29030decb48b42d522ad341595..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 Binary files a/multimodal/tests/datasets/input_x_dic.pkl and b/multimodal/tests/datasets/input_x_dic.pkl differ diff --git a/multimodal/tests/test_cumbo.py b/multimodal/tests/test_cumbo.py index 153bc94f3ffbb2fde17a142eeff3534cedfb6d2d..7841d32461bc954617c3e1877ecea4b225432a78 100644 --- a/multimodal/tests/test_cumbo.py +++ b/multimodal/tests/test_cumbo.py @@ -14,7 +14,7 @@ from sklearn.ensemble import RandomForestClassifier from sklearn.cluster import KMeans from sklearn.tree import DecisionTreeClassifier from sklearn import datasets - +from sklearn.utils.estimator_checks import check_estimator from multimodal.boosting.cumbo import MuCumboClassifier from multimodal.tests.data.get_dataset_path import get_dataset_path from multimodal.datasets.data_sample import MultiModalArray @@ -208,7 +208,7 @@ class TestMuCumboClassifier(unittest.TestCase): clf = MuCumboClassifier() clf.n_views_ = 3 clf.n_classes_ = 3 - clf.n_yi = np.array([1, 1, 2]) + clf.n_yi_ = np.array([1, 1, 2]) A, b, G, h, l = clf._prepare_beta_solver() y_i = np.array([0, 1, 2, 0]) predicted_classes = np.array([[0, 0, 1, 1], [0, 1, 0, 2], [2, 2, 0, 0]]) @@ -234,7 +234,7 @@ class TestMuCumboClassifier(unittest.TestCase): clf = MuCumboClassifier() clf.n_views_ = 3 clf.n_classes_ = 3 - clf.n_yi = np.array([1, 1, 2]) + clf.n_yi_ = np.array([1, 1, 2]) cost_Tminus1 =np.array([[[-7.45744585e+01, 3.67879439e-01, 7.42065790e+01], [ 4.78511743e-06, 3.87742081e-02, -3.87789932e-02], [ 2.47875218e-03, -2.48182428e-03, 3.07210618e-06], @@ -356,7 +356,7 @@ class TestMuCumboClassifier(unittest.TestCase): clf = MuCumboClassifier() clf.n_views_ = 3 clf.n_classes_ = 3 - clf.n_yi = np.array([1, 1, 2]) + clf.n_yi_ = np.array([1, 1, 2]) betas = clf._compute_betas(alphas, y, score_function_Tminus1, predicted_classes) cost, label_score, score_function_dif = clf._compute_cost(label_score, predicted_classes, y, alphas, betas, use_coop_coef=True) @@ -862,9 +862,9 @@ class TestMuCumboClassifier(unittest.TestCase): # clf.fit(X, y, views_ind) # assert_equal(clf.score(X, y), 1.) # - # - # def test_classifier(): - # return check_estimator(MuCumboClassifier) + + def test_classifier(self): + return check_estimator(MuCumboClassifier) # # # def test_iris(): diff --git a/setup.py b/setup.py index bff0c3e733cae5a9ae84a8f53e0fc6dd3b66b921..5ec883afbe4ac9c099fd9e7ba5d3be774639c22d 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,8 @@ def setup_package(): 'Documentation': 'http://{}.pages.lis-lab.fr/{}'.format(group, name), 'Source': url, 'Tracker': '{}/issues'.format(url)} - author = 'Dominique Benielli' + author = 'Dominique Benielli and Sokol Koço and Florent Jaillet and Riikka Huusari ' \ + 'and Cécile Capponi and Hachem Kadri' author_email = 'contact.dev@lis-lab.fr' license = 'newBSD' classifiers = [