diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b8a44d3d9e64160d1690c133b91543081a4b6e16
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,34 @@
+# run the test suite
+tests:
+    image: registry.gitlab.lis-lab.fr:5005/skmad-suite/madarrays/ubuntu:18.04
+    tags:
+        - docker
+    script:
+        - cd python
+        - pip3 install --no-deps ltfatpy madarrays yafe skpomade pandas xarray
+        - pip3 install 'scipy==1.4.1' -U
+        - pip3 install 'matplotlib==3.1.2' -U
+        - pip3 install --no-deps .
+        - python3 tffpy/tests/ci_config.py
+        - pytest-3
+
+# generate the documentation
+pages:
+    image: registry.gitlab.lis-lab.fr:5005/skmad-suite/madarrays/ubuntu:18.04
+    tags:
+        - docker
+    only:
+        - master
+    script:
+        - cd python
+        - pip3 install --no-deps ltfatpy madarrays yafe skpomade pandas xarray
+        - pip3 install 'scipy==1.4.1' -U
+        - pip3 install 'matplotlib==3.1.2' -U
+        - pip3 install --no-deps .
+        - python3 tffpy/tests/ci_config.py
+        - python3 setup.py build_sphinx
+        - cp -r build/sphinx/html ../public
+    artifacts:
+        paths:
+            - public
+
diff --git a/README.md b/README.md
index 681f11e6019567708710d9f0471b26ea0a7ae5f0..3b3f7f17e2df846f8006f8c1767f34e7dddcd439 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,18 @@
 # TFF2020 - Time-Frequency Fading
+
+Code and data to reproduce experiments from paper
+ *Time-frequency fading algorithms based on Gabor multipliers*
+ by A. Marina Krémé, Valentin Emiya, Caroline Chaux and Bruno Torré́sani, 2020.
+
+The sound material is available in folder 'data'.
+
+The code is available in folders 'matlab' and 'python'. The main experiments are available in both programming languages:
+
+* Figure 1 can be reproduced in Matlab by running file `run_illustration_cuicui_eigenvalues.m`
+* Figure 2 can be reproduced in Matlab by running file `exp_eigenval_win.m`.
+* Figure 3 can be reproduced in Matlab by running file `rank_estimation_halko_vs_eigs_gausswin.m`.
+* Figure 4 can be reproduced in Python by running the specific tasks 12 and 13 from `tffpy.scripts.script_exp_solve_tff.py`.
+* Figure 5 can be reproduced in Python by running the specific tasks 12 and 13 from `tffpy.scripts.script_exp_solve_tff.py`.
+* Figure 6 can be reproduced in Python by running the full experiment from `tffpy.scripts.script_exp_solve_tff.py`.
+* Table I can be reproduced in Python by running the full experiment from `tffpy.scripts.script_exp_solve_tff.py`.
+* Table II can be reproduced in Python by running the full experiment from `tffpy.scripts.script_exp_solve_tff.py`.
diff --git a/python/LICENSE.txt b/python/LICENSE.txt
new file mode 100755
index 0000000000000000000000000000000000000000..810fce6e9bf2aa10265b85614db5ac65941ecf81
--- /dev/null
+++ b/python/LICENSE.txt
@@ -0,0 +1,621 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
diff --git a/python/MANIFEST.in b/python/MANIFEST.in
new file mode 100755
index 0000000000000000000000000000000000000000..f27954d207964f7720006d4461c1986a172b7ce5
--- /dev/null
+++ b/python/MANIFEST.in
@@ -0,0 +1,8 @@
+include *.txt
+include *.rst
+include VERSION
+recursive-include doc *.rst *.py *.ipynb
+
+include tffpy/tests/*.py
+
+prune doc/build
diff --git a/python/README.md b/python/README.md
deleted file mode 100644
index ac63db667ba40a131132486d11173e7d2a941f32..0000000000000000000000000000000000000000
--- a/python/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# Python code for time-frequency fading
-
diff --git a/python/README.rst b/python/README.rst
new file mode 100755
index 0000000000000000000000000000000000000000..970af7b11c62b3ecc1589aaf342b24f66e7286d5
--- /dev/null
+++ b/python/README.rst
@@ -0,0 +1,66 @@
+tffpy
+=====
+
+A Python package for time-frequency fading using Gabor multipliers based on
+the work in paper *Time-frequency fading algorithms based on Gabor
+multipliers* by A. Marina Krémé, Valentin Emiya, Caroline
+Chaux and Bruno Torré́sani, 2020.
+
+Install
+-------
+
+Install the current release with ``pip``::
+
+    pip install tffpy
+
+Download the data from `this link <https://gitlab.lis-lab.fr/skmad-suite/tff2020/-/archive/master/tff2020-master.zip?path=data>`_.
+
+Then run function `tffpy.utils.generate_config` in order to create
+a configuration file and modify it to specify the path to your data folder.
+The location of the configuration file is given by function
+`tffpy.utils.get_config_file`.
+
+For additional details, see doc/install.rst.
+
+Usage
+-----
+
+See the `documentation <http://skmad-suite.pages.lis-lab.fr/tff2020/>`_.
+
+Bugs
+----
+
+Please report any bugs that you find through the `tffpy GitLab project
+<https://gitlab.lis-lab.fr/skmad-suite/tff2020/issues>`_.
+
+You can also fork the repository and create a merge request.
+
+Source code
+-----------
+
+The source code of tffpy is available via its `GitLab project
+<https://gitlab.lis-lab.fr/skmad-suite/tff2020>`_.
+
+You can clone the git repository of the project using the command::
+
+    git clone git@gitlab.lis-lab.fr:skmad-suite/tff2020.git
+
+Copyright © 2020
+----------------
+
+* `Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>`_
+* `Université d'Aix-Marseille <http://www.univ-amu.fr/>`_
+* `Centre National de la Recherche Scientifique <http://www.cnrs.fr/>`_
+* `Université de Toulon <http://www.univ-tln.fr/>`_
+
+Contributors
+------------
+
+* `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+* `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+
+License
+-------
+
+Released under the GNU General Public License version 3 or later
+(see `LICENSE.txt`).
diff --git a/python/VERSION b/python/VERSION
new file mode 100755
index 0000000000000000000000000000000000000000..7029df1ccdfe9fe757a9fee7e16b86862d8d07bc
--- /dev/null
+++ b/python/VERSION
@@ -0,0 +1 @@
+tffpy:0.1.3
diff --git a/python/doc/Makefile b/python/doc/Makefile
new file mode 100755
index 0000000000000000000000000000000000000000..64227f569466b8601987972e617aabe461d39d71
--- /dev/null
+++ b/python/doc/Makefile
@@ -0,0 +1,121 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = python -m sphinx 
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest epub
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  epub       to make an epub"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+	@echo "  gitwash   to update the gitwash documentation"
+
+
+clean:
+	-rm -rf build/*
+	-rm -rf ghpages_build
+	-rm -rf auto_examples modules
+	-rm -rf reference/generated reference/algorithms/generated reference/classes/generated reference/readwrite/generated
+
+dist: html
+	test -d build/latex || make latex
+	make -C build/latex all-pdf
+	-rm -rf build/dist
+	(cd build/html; cp -r . ../../build/dist)
+	(cd build/dist && tar czf ../dist.tar.gz .)
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in build/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in build/qthelp, like this:"
+	@echo "# qcollectiongenerator build/qthelp/test.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile build/qthelp/test.qhc"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) build/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in build/doctest/output.txt."
+
+latexpdf: latex
+	@echo "Running LaTeX files through latexmk..."
+	$(MAKE) -C build/latex all-pdf
+	@echo "latexmk finished; the PDF files are in build/latex."
+
+docs: clean html latexpdf
+	cp build/latex/networkx_reference.pdf build/html/_downloads/.
+	
+gitwash-update:
+	python ../tools/gitwash_dumper.py developer networkx \
+	--project-url=http://networkx.github.io \
+	--project-ml-url=http://groups.google.com/group/networkx-discuss/ \
+	--gitwash-url git@github.com:matthew-brett/gitwash.git
diff --git a/python/doc/README.md b/python/doc/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..a0401d7e3f660c431df229b3991c00ae5ada6cd2
--- /dev/null
+++ b/python/doc/README.md
@@ -0,0 +1,28 @@
+If you only want to get the documentation, note that a pre-built
+version for the latest release is available
+[online](http://skmad-suite.pages.lis-lab.fr/tff2020/).
+
+Sphinx is used to generate the API and reference documentation.
+
+## Instructions to build the documentation
+
+In addition to installing ``tffpy`` and its dependencies, install the
+Python packages needed to build the documentation by entering
+
+```
+pip install -r ../requirements/doc.txt
+```
+in the ``doc/`` directory.
+
+To build the HTML documentation, run:
+```
+make html
+```
+in the ``doc/`` directory. This will generate a ``build/html`` subdirectory
+containing the built documentation.
+
+To build the PDF documentation, run:
+```
+make latexpdf
+```
+You will need to have Latex installed for this.
diff --git a/python/doc/_notebooks/baseline_interpolation_solver.ipynb b/python/doc/_notebooks/baseline_interpolation_solver.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..ebc94eb0dfc27b84176343ded1d67fab4467e0d0
--- /dev/null
+++ b/python/doc/_notebooks/baseline_interpolation_solver.ipynb
@@ -0,0 +1,131 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "ein.tags": [
+     "worksheet-0"
+    ],
+    "slideshow": {
+     "slide_type": "-"
+    }
+   },
+   "source": [
+    "# Demo for `tffpy.interpolation_solver`\n",
+    "\n",
+    "A simple demonstration of the baseline interpolation solver"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "\n",
+    "%matplotlib inline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%javascript\n",
+    "IPython.OutputArea.prototype._should_scroll = function(lines) {\n",
+    "    return false;\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "autoscroll": "json-false",
+    "collapsed": true,
+    "ein.tags": [
+     "worksheet-0"
+    ],
+    "slideshow": {
+     "slide_type": "-"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import matplotlib as mpl\n",
+    "mpl.rcParams['figure.figsize'] = [15.0, 7.0]\n",
+    "\n",
+    "from tffpy.datasets import get_mix\n",
+    "from tffpy.interpolation_solver import solve_by_interpolation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "win_type = 'gauss'\n",
+    "win_dur = 256 / 8000\n",
+    "hop_ratio = 1 / 4\n",
+    "n_bins_ratio = 4\n",
+    "delta_mix_db = 0\n",
+    "delta_loc_db = 30\n",
+    "n_iter_closing = n_iter_opening = 3\n",
+    "wb_to_loc_ratio_db = 8\n",
+    "closing_first = True\n",
+    "or_mask = True\n",
+    "fig_dir = 'fig_interpolation'\n",
+    "\n",
+    "x_mix, dgt_params, signal_params, mask, x_bird, x_engine = \\\n",
+    "    get_mix(loc_source='bird', wideband_src='car',\n",
+    "            wb_to_loc_ratio_db=wb_to_loc_ratio_db,\n",
+    "            win_dur=win_dur, win_type=win_type,\n",
+    "            hop_ratio=hop_ratio, n_bins_ratio=n_bins_ratio,\n",
+    "            n_iter_closing=n_iter_closing,\n",
+    "            n_iter_opening=n_iter_opening,\n",
+    "            closing_first=closing_first,\n",
+    "            delta_mix_db=delta_mix_db, delta_loc_db=delta_loc_db,\n",
+    "            or_mask=or_mask, fig_dir=fig_dir)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "x_est = solve_by_interpolation(x_mix, mask, dgt_params, signal_params,\n",
+    "                               fig_dir)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  },
+  "name": "data_structures.ipynb"
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/python/doc/_notebooks/create_subregions.ipynb b/python/doc/_notebooks/create_subregions.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..504350efeff48f13817750c8277b46fc4c3859d1
--- /dev/null
+++ b/python/doc/_notebooks/create_subregions.ipynb
@@ -0,0 +1,134 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Demo for `tffpy.create_subregions.create_subregions`\n",
+    "\n",
+    "A simple demonstration for creating sub-regions from a boolean time-frequency mask"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "\n",
+    "%matplotlib inline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%javascript\n",
+    "IPython.OutputArea.prototype._should_scroll = function(lines) {\n",
+    "    return false;\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import matplotlib as mpl\n",
+    "mpl.rcParams['figure.figsize'] = [15.0, 7.0]\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "from tffpy.datasets import get_mix\n",
+    "from tffpy.create_subregions import create_subregions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig_dir = 'fig_create_subregions'\n",
+    "x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \\\n",
+    "            get_mix(loc_source='bird',\n",
+    "                    wideband_src='car',\n",
+    "                    crop=8192,\n",
+    "                    win_dur=256/8000,\n",
+    "                    win_type='gauss',\n",
+    "                    hop_ratio=1/4,\n",
+    "                    n_bins_ratio=4,\n",
+    "                    n_iter_closing=3,\n",
+    "                    n_iter_opening=3,\n",
+    "                    closing_first=True,\n",
+    "                    delta_mix_db=0,\n",
+    "                    delta_loc_db=20,\n",
+    "                    wb_to_loc_ratio_db=16,\n",
+    "                    or_mask=True,\n",
+    "                    fig_dir=None)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tol = 1e-9\n",
+    "mask_with_subregions, norms = create_subregions(mask_bool=mask, \n",
+    "                                                dgt_params=dgt_params, signal_params=signal_params, \n",
+    "                                                tol=tol, fig_dir=fig_dir, return_norms=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Values in mask:', np.unique(mask_with_subregions))\n",
+    "print('Number of sub-regions:', np.max(mask_with_subregions))\n",
+    "plt.semilogy(np.sort(np.unique(norms)))\n",
+    "plt.title('Distances between sub-regions (sorted norm coefficients without duplicates)')\n",
+    "plt.grid()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/doc/_notebooks/mask_energy_estimation.ipynb b/python/doc/_notebooks/mask_energy_estimation.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..c17567067640d621c9c54668779806c087175067
--- /dev/null
+++ b/python/doc/_notebooks/mask_energy_estimation.ipynb
@@ -0,0 +1,120 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Demo for `tffpy.tf_fading.estimate_energy_in_mask`\n",
+    "\n",
+    "A simple demonstration for the estimation of energy in time-frequency regions."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "\n",
+    "%matplotlib inline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%javascript\n",
+    "IPython.OutputArea.prototype._should_scroll = function(lines) {\n",
+    "    return false;\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import matplotlib as mpl\n",
+    "mpl.rcParams['figure.figsize'] = [15.0, 7.0]\n",
+    "\n",
+    "from tffpy.datasets import get_mix\n",
+    "from tffpy.tf_fading import estimate_energy_in_mask"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig_dir = 'fig_energy_estimation'\n",
+    "x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \\\n",
+    "            get_mix(loc_source='bird',\n",
+    "                    wideband_src='car',\n",
+    "                    crop=None,\n",
+    "                    win_dur=256/8000,\n",
+    "                    win_type='gauss',\n",
+    "                    hop_ratio=1/4,\n",
+    "                    n_bins_ratio=4,\n",
+    "                    n_iter_closing=3,\n",
+    "                    n_iter_opening=3,\n",
+    "                    closing_first=True,\n",
+    "                    delta_mix_db=0,\n",
+    "                    delta_loc_db=40,\n",
+    "                    wb_to_loc_ratio_db=8,\n",
+    "                    or_mask=True,\n",
+    "                    fig_dir=fig_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "estimate_energy_in_mask(x_mix=x_mix, mask=mask, dgt_params=dgt_params, signal_params=signal_params,\n",
+    "                        fig_dir=fig_dir, prefix=None)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/doc/conf.py b/python/doc/conf.py
new file mode 100755
index 0000000000000000000000000000000000000000..41f0359f21f2a8e7ce5cd1668ed6363c47b187b6
--- /dev/null
+++ b/python/doc/conf.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import sys
+import os
+
+from datetime import date
+
+import tffpy
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../tffpy/'))
+
+# -- General configuration ------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.autosummary',
+              'sphinx.ext.doctest',
+              'sphinx.ext.intersphinx',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.mathjax',
+              'sphinx.ext.viewcode',
+              'numpydoc',
+              'nbsphinx',
+              'IPython.sphinxext.ipython_console_highlighting',
+              ]
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'tffpy'
+author = 'V. Emiya, A. Nother'
+copyright = '2019-{}, {}'.format(date.today().year, author)
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = tffpy.__version__
+# The full version, including alpha/beta/rc tags.
+release = tffpy.__version__.replace('_', '')
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build', '**/test_*.rst', '**.ipynb_checkpoints']
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'bizstyle'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+# html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = []
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+html_additional_pages = {}
+
+# If false, no module index is generated.
+html_domain_indices = True
+
+# If false, no index is generated.
+html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
+html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+html_search_scorer = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'tffpydoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+    # The paper size ('letterpaper' or 'a4paper').
+    'papersize': 'a4paper',
+
+    # The font size ('10pt', '11pt' or '12pt').
+    'pointsize': '10pt',
+
+    # Additional stuff for the LaTeX preamble.
+    'preamble': '',
+
+    # Latex figure (float) alignment
+    'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'tffpy.tex', 'tffpy Documentation', author, 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+latex_use_parts = False
+
+# If true, show page references after internal links.
+latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+latex_appendices = []
+
+# If false, no module index is generated.
+latex_domain_indices = True
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [(master_doc, 'tffpy', 'tffpy Documentation', [author], 1)]
+
+# If true, show URL addresses after external links.
+man_show_urls = False
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'tffpy.tex', 'tffpy Documentation', author, 'manual'),
+]
+
+# Documents to append as an appendix to all manuals.
+texinfo_appendices = []
+
+# If false, no module index is generated.
+texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+texinfo_no_detailmenu = False
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+    'numpy': ('https://docs.scipy.org/doc/numpy/', None),
+    'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
+    'pandas': ('https://pandas.pydata.org/docs/', None),
+    'skpomade': ('http://valentin.emiya.pages.lis-lab.fr/skpomade/', None),
+    'yafe': ('http://skmad-suite.pages.lis-lab.fr/yafe/', None),
+    'ltfatpy': ('http://dev.pages.lis-lab.fr/ltfatpy/', None),
+}
+
+# Allow errors in notebook
+nbsphinx_allow_errors = True
+
+# Timeout in notebook
+nbsphinx_timeout = 120
+
+# Do not show class members
+numpydoc_show_class_members = False
+
+# Include todos
+todo_include_todos = True
+
+# Order members by member type
+autodoc_member_order = 'groupwise'
diff --git a/python/doc/credits.rst b/python/doc/credits.rst
new file mode 100755
index 0000000000000000000000000000000000000000..fe0706b10a88fe0e8ef4087b5879a6d03ec93147
--- /dev/null
+++ b/python/doc/credits.rst
@@ -0,0 +1,36 @@
+Credits
+=======
+
+Copyright(c) 2020
+-----------------
+
+* Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+* Université d'Aix-Marseille <http://www.univ-amu.fr/>
+* Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+* Université de Toulon <http://www.univ-tln.fr/>
+
+Contributors
+------------
+
+* Valentin Emiya <firstname.lastname_AT_lis-lab.fr>
+* Ama Marina Kreme <firstname.lastname_AT_lis-lab.fr>
+
+This package has been created thanks to the joint work with Florent Jaillet
+and Ronan Hamon on other packages.
+
+Licence
+-------
+This file is part of tffpy.
+
+tffpy is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
diff --git a/python/doc/index.rst b/python/doc/index.rst
new file mode 100755
index 0000000000000000000000000000000000000000..a49441d1a9f68e4ada66c467728aed126193c602
--- /dev/null
+++ b/python/doc/index.rst
@@ -0,0 +1,49 @@
+##########################
+:mod:`tffpy` documentation
+##########################
+
+Overview
+========
+:py:mod:`tffpy`: time-frequency fading problem and solvers using Gabor
+multipliers, based paper
+*Time-frequency fading algorithms based on Gabor multipliers*
+by M. Kreme, V. Emiya, C. Chaux and B. Torrésani in 2020.
+
+The package :py:mod:`tffpy` includes in particular:
+
+* class :py:class:`tffpy.tf_fading.GabMulTff` that implements the proposed
+  solver for reconstructing a source from a mixture and a time-frequency
+  binary mask.
+
+* class :py:class:`tffpy.experiments.exp_solve_tff.SolveTffExperiment` to
+  conduct the main experiment on mixtures of real sounds, with time-frequency
+  masks generated automatically, using the proposed solutions and baseline
+  solvers. Script `tffpy.scripts.script_exp_solve_tff` provide example of
+  code to handle the experiment (configuring, running on a computer grid or a
+  single computer, display results).
+
+Similar and complementary code is available in Matlab.
+
+Documentation
+=============
+
+.. only:: html
+
+    :Release: |version|
+    :Date: |today|
+
+.. toctree::
+    :maxdepth: 1
+
+    installation
+    references
+    tutorials
+    credits
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/python/doc/installation.rst b/python/doc/installation.rst
new file mode 100755
index 0000000000000000000000000000000000000000..a4eb5b80ff4bc9fe4099192b12277da9f3fd8ce8
--- /dev/null
+++ b/python/doc/installation.rst
@@ -0,0 +1,93 @@
+Installation
+############
+
+``tffpy`` requires the following packages, which will be automatically
+installed with ``tffpy`` using ``pip``:
+
+* `python >= 3.6 <https://wiki.python.org/moin/BeginnersGuide/Download>`_
+* `numpy >= 1.13 <http://www.numpy.org>`_
+* `scipy <https://www.scipy.org/>`_
+* `matplotlib <https://matplotlib.org/>`_
+* `pandas <https://pandas.pydata.org/>`_
+* `xarray <https://xarray.pydata.org/>`_
+* `ltfatpy <http://dev.pages.lis-lab.fr/ltfatpy/>`_
+* `skpomade <http://valentin.emiya.pages.lis-lab.fr/skpomade/>`_
+* `yafe <http://skmad-suite.pages.lis-lab.fr/yafe/>`_
+* `madarrays <https://gitlab.lis-lab.fr/skmad-suite/madarrays>`_
+
+Make sure your Python environment is properly configured. It is recommended to
+install ``tffpy`` in a virtual environment.
+
+Release version
+---------------
+
+First, make sure you have the latest version of pip (the Python package
+manager) installed. If you do not, refer to the `Pip documentation
+<https://pip.pypa.io/en/stable/installing/>`_ and install ``pip`` first.
+
+Install the current release with ``pip``::
+
+    pip install tffpy
+
+To upgrade to a newer release use the ``--upgrade`` flag::
+
+    pip install --upgrade tffpy
+
+If you do not have permission to install software systemwide, you can install
+into your user directory using the ``--user`` flag::
+
+    pip install --user tffpy
+
+Alternatively, you can manually download ``tffpy`` from its `GitLab project
+<https://gitlab.lis-lab.fr/skmad-suite/tff2020>`_  or `PyPI
+<https://pypi.python.org/pypi/tffpy>`_.  To install one of these versions,
+unpack it and run the following from the top-level source directory using the
+Terminal::
+
+    pip install .
+
+Dataset installation
+--------------------
+Download the data from `this link <https://gitlab.lis-lab.fr/skmad-suite/tff2020/-/archive/master/tff2020-master.zip?path=data>`_.
+
+Then run function :py:func:`tffpy.utils.generate_config` in order to create
+a configuration file and modify it to specify the path to your data folder.
+The location of the configuration file is given by function
+:py:func:`tffpy.utils.get_config_file`.
+
+Development version
+-------------------
+
+If you have `Git <https://git-scm.com/>`_ installed on your system, it is also
+possible to install the development version of ``tffpy``.
+
+Before installing the development version, you may need to uninstall the
+standard version of ``tffpy`` using ``pip``::
+
+    pip uninstall tffpy
+
+Clone the Git repository::
+
+    git clone git@gitlab.lis-lab.fr:skmad-suite/tff2020.git
+    cd python
+
+You may also need to install required packages::
+
+    pip install -r requirements/defaults.txt
+
+Then execute ``pip`` with flag ``-e`` to follow the development branch::
+
+    pip install -e .
+
+To update ``tffpy`` at any time, in the same directory do::
+
+    git pull
+
+To run unitary tests, first install required packages::
+
+    pip install -r requirements/dev.txt
+
+and execute ``pytest``::
+
+    pytest
+
diff --git a/python/doc/references.rst b/python/doc/references.rst
new file mode 100755
index 0000000000000000000000000000000000000000..b695502ae93c8e0e979c9baca7d650b63eb35379
--- /dev/null
+++ b/python/doc/references.rst
@@ -0,0 +1,62 @@
+References
+==========
+
+    :Release: |release|
+    :Date: |today|
+
+tffpy\.create_subregions module
+-------------------------------
+
+.. automodule:: tffpy.create_subregions
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.datasets module
+----------------------
+
+.. automodule:: tffpy.datasets
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.interpolation_solver module
+----------------------------------
+
+.. automodule:: tffpy.interpolation_solver
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.tf_fading module
+-----------------------
+
+.. automodule:: tffpy.tf_fading
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.tf_tools module
+----------------------
+
+.. automodule:: tffpy.tf_tools
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.utils module
+-------------------
+
+.. automodule:: tffpy.utils
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+tffpy\.experiments\.exp_solve_tff module
+----------------------------------------
+
+.. automodule:: tffpy.experiments.exp_solve_tff
+    :members:
+    :special-members: __call__
+    :undoc-members:
+    :show-inheritance:
diff --git a/python/doc/tutorials.rst b/python/doc/tutorials.rst
new file mode 100755
index 0000000000000000000000000000000000000000..655e76b31cba47101679144442c8256343d9d943
--- /dev/null
+++ b/python/doc/tutorials.rst
@@ -0,0 +1,9 @@
+Tutorials and demonstrations
+############################
+
+.. toctree::
+    :maxdepth: 1
+
+    _notebooks/mask_energy_estimation.ipynb
+    _notebooks/create_subregions.ipynb
+    _notebooks/baseline_interpolation_solver.ipynb
diff --git a/python/requirements/defaults.txt b/python/requirements/defaults.txt
new file mode 100755
index 0000000000000000000000000000000000000000..b7e2bc492d4be555cf14dbc17fba6f1ab38a3375
--- /dev/null
+++ b/python/requirements/defaults.txt
@@ -0,0 +1,11 @@
+--index-url https://pypi.python.org/simple/
+
+numpy>=1.13
+scipy>=1.4.1
+matplotlib>=3.1.2
+pandas
+xarray
+ltfatpy
+skpomade
+yafe
+madarrays
diff --git a/python/requirements/dev.txt b/python/requirements/dev.txt
new file mode 100755
index 0000000000000000000000000000000000000000..b287a430e410a343d9d1e4dbc663b158dbe15fa5
--- /dev/null
+++ b/python/requirements/dev.txt
@@ -0,0 +1,6 @@
+--index-url https://pypi.python.org/simple/
+
+coverage
+pytest
+pytest-cov
+pytest-randomly
diff --git a/python/requirements/doc.txt b/python/requirements/doc.txt
new file mode 100755
index 0000000000000000000000000000000000000000..3f09571e01be27274a79d20a1ca47f9f76be206f
--- /dev/null
+++ b/python/requirements/doc.txt
@@ -0,0 +1,5 @@
+--index-url https://pypi.python.org/simple/
+
+nbsphinx
+numpydoc
+sphinx
diff --git a/python/setup.cfg b/python/setup.cfg
new file mode 100755
index 0000000000000000000000000000000000000000..6f7af4bbeee5bdf7a61a820a35798c37db885179
--- /dev/null
+++ b/python/setup.cfg
@@ -0,0 +1,30 @@
+[tool:pytest]
+testpaths = tffpy
+addopts = --verbose
+          --cov-report=term-missing
+          --cov-report=html
+          --cov=tffpy
+          --doctest-modules
+
+[coverage:run]
+branch = True
+source = tffpy
+include = */tffpy/*
+omit = */tests/*
+
+[coverage:report]
+exclude_lines =
+    pragma: no cover
+    if self.debug:
+    if settings.DEBUG
+    raise AssertionError
+    raise NotImplementedError
+    if 0:
+    if __name__ == .__main__.:
+    if obj is None: return
+    if verbose > 0:
+    if self.verbose > 0:
+    if verbose > 1:
+    if self.verbose > 1:
+    pass
+    def __str__(self):
diff --git a/python/setup.py b/python/setup.py
new file mode 100755
index 0000000000000000000000000000000000000000..690309a1936e7b03cb1fb88bd986e367e3a6a7cd
--- /dev/null
+++ b/python/setup.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+
+import os
+from setuptools import setup, find_packages
+import sys
+
+NAME = 'tffpy'
+DESCRIPTION = 'Time frequency fading using Gabor multipliers'
+LICENSE = 'GNU General Public License v3 (GPLv3)'
+URL = 'https://gitlab.lis-lab.fr/skmad-suite/tff2020'
+AUTHOR = 'Valentin Emiya, Ama Marina Kreme'
+AUTHOR_EMAIL = ('valentin.emiya@lis-lab.fr, '
+                'ama-marina.kreme@lis-lab.fr')
+INSTALL_REQUIRES = ['numpy', 'scipy', 'matplotlib', 'pandas', 'xarray',
+                    'ltfatpy', 'skpomade', 'yafe', 'madarrays']
+CLASSIFIERS = [
+    'Development Status :: 5 - Production/Stable',
+    'Intended Audience :: Developers',
+    'Intended Audience :: End Users/Desktop',
+    'Intended Audience :: Science/Research',
+    'Topic :: Scientific/Engineering :: Mathematics',
+    'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+    'Natural Language :: English',
+    'Operating System :: MacOS :: MacOS X ',
+    'Operating System :: POSIX :: Linux',
+    'Programming Language :: Python :: 3.6']
+PYTHON_REQUIRES = '>=3.6'
+EXTRAS_REQUIRE = {
+    'dev': ['coverage', 'pytest', 'pytest-cov', 'pytest-randomly'],
+    'doc': ['nbsphinx', 'numpydoc', 'sphinx']}
+PROJECT_URLS = {'Bug Reports': URL + '/issues',
+                'Source': URL}
+KEYWORDS = 'time-frequency, fading, filtering, Gabor multiplier, audio'
+
+###############################################################################
+if sys.argv[-1] == 'setup.py':
+    print("To install, run 'python setup.py install'\n")
+
+if sys.version_info[:2] < (3, 6):
+    errmsg = '{} requires Python 3.6 or later ({[0]:d}.{[1]:d} detected).'
+    print(errmsg.format(NAME, sys.version_info[:2]))
+    sys.exit(-1)
+
+
+def get_version():
+    v_text = open('VERSION').read().strip()
+    v_text_formted = '{"' + v_text.replace('\n', '","').replace(':', '":"')
+    v_text_formted += '"}'
+    v_dict = eval(v_text_formted)
+    return v_dict[NAME]
+
+
+def set_version(path, VERSION):
+    filename = os.path.join(path, '__init__.py')
+    buf = ""
+    for line in open(filename, "rb"):
+        if not line.decode("utf8").startswith("__version__ ="):
+            buf += line.decode("utf8")
+    f = open(filename, "wb")
+    f.write(buf.encode("utf8"))
+    f.write(('__version__ = "%s"\n' % VERSION).encode("utf8"))
+
+
+def setup_package():
+    """Setup function"""
+    # set version
+    VERSION = get_version()
+
+    here = os.path.abspath(os.path.dirname(__file__))
+    with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
+        long_description = f.read()
+
+    mod_dir = NAME
+    set_version(mod_dir, get_version())
+    setup(name=NAME,
+          version=VERSION,
+          description=DESCRIPTION,
+          long_description=long_description,
+          url=URL,
+          author=AUTHOR,
+          author_email=AUTHOR_EMAIL,
+          license=LICENSE,
+          classifiers=CLASSIFIERS,
+          keywords=KEYWORDS,
+          packages=find_packages(exclude=['doc', 'dev']),
+          install_requires=INSTALL_REQUIRES,
+          python_requires=PYTHON_REQUIRES,
+          extras_require=EXTRAS_REQUIRE,
+          project_urls=PROJECT_URLS)
+
+
+if __name__ == "__main__":
+    setup_package()
diff --git a/python/tffpy/__init__.py b/python/tffpy/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..76ef20e1ba0c156ceb3b24c160254a76342a36a6
--- /dev/null
+++ b/python/tffpy/__init__.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+""" Filtering out time-frequency using Gabor multipliers
+
+.. moduleauthor:: Valentin Emiya
+"""
+# from .tf_tools import GaborMultiplier, get_dgt_params, get_signal_params, dgt
+
+# __all__ = ['GaborMultiplier', 'get_dgt_params', 'get_signal_params', 'dgt']
+
+# TODO minimal documentation__version__ = "0.1"
+__version__ = "0.1.3"
diff --git a/python/tffpy/create_subregions.py b/python/tffpy/create_subregions.py
new file mode 100644
index 0000000000000000000000000000000000000000..87494c930c58366217a1245e927dd77700a613e9
--- /dev/null
+++ b/python/tffpy/create_subregions.py
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+
+.. moduleauthor:: Valentin Emiya
+"""
+# TODO check if eigs(, 1) can be replaced by Halko to run faster
+from pathlib import Path
+import warnings
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.ndimage import label
+from scipy.sparse.linalg import eigs
+
+from tffpy.utils import plot_mask
+from tffpy.tf_tools import GaborMultiplier
+
+
+def create_subregions(mask_bool, dgt_params, signal_params, tol,
+                      fig_dir=None, return_norms=False):
+    """
+    Create sub-regions from boolean mask and tolerance on sub-region distance.
+
+    See Algorithm 3 *Finding sub-regions for TFF-P* in the reference paper.
+
+    Parameters
+    ----------
+    mask_bool : nd-array
+        Time-frequency boolean mask
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    tol : float
+        Tolerance on sub-region distance (spectral norm of the composition
+        of the Gabor multipliers related to two candidate sub-regions).
+    fig_dir : Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+    return_norms : bool
+        If True, the final distance matrix is returned as a second output.
+
+    Returns
+    -------
+    mask_labeled : nd-array
+        Time-frequency mask with one positive integer for each sub-region
+        and zeros outside sub-regions.
+    pq_norms : nd-array
+        Matrix of distances between sub-regions.
+    """
+    mask_labeled, n_labels = label(mask_bool)
+    pq_norms = _get_pq_norms(mask=mask_labeled,
+                             dgt_params=dgt_params, signal_params=signal_params)
+
+    if fig_dir is not None:
+        fig_dir = Path(fig_dir)
+        fig_dir.mkdir(parents=True, exist_ok=True)
+
+        plt.figure()
+        plot_mask(mask=mask_labeled, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.set_cmap('nipy_spectral')
+        plt.title('Initial subregions')
+        plt.savefig(fig_dir / 'initial_subregions.pdf')
+
+        # from matplotlib.colors import LogNorm
+        plt.figure()
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore")
+            plt.imshow(np.log10(pq_norms+pq_norms.T), origin='lower')
+        plt.ylabel('Sub-region index')
+        plt.xlabel('Sub-region index')
+        plt.colorbar()
+        plt.set_cmap('viridis')
+        plt.title('Initial norms of Gabor multiplier composition')
+        plt.savefig(fig_dir / 'initial_norms.pdf')
+        n_labels_max = n_labels
+
+    while pq_norms.max() > tol:
+        # Merge each pair (p, q), q < p, such that pq_norms[p, q] > tol
+        to_be_updated = [False] * n_labels
+        while pq_norms.max() > tol:
+            i_p, i_q = np.unravel_index(np.argmax(pq_norms, axis=None),
+                                        pq_norms.shape)
+            mask_labeled, pq_norms = _merge_subregions(mask=mask_labeled,
+                                                       pq_norms=pq_norms,
+                                                       i_p=i_p, i_q=i_q)
+            to_be_updated[i_q] = True
+            to_be_updated[i_p] = to_be_updated[-1]
+            to_be_updated = to_be_updated[:-1]
+            n_labels -= 1
+        for i_p in range(n_labels):
+            if to_be_updated[i_p]:
+                _update_pq_norms(mask=mask_labeled,
+                                 pq_norms=pq_norms, i_p=i_p,
+                                 dgt_params=dgt_params,
+                                 signal_params=signal_params)
+        # print('Merge sub-region p={}'.format(i_p))
+
+        if fig_dir is not None:
+            plt.figure()
+            plot_mask(mask=mask_labeled, hop=dgt_params['hop'],
+                      n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+            plt.title('subregions')
+            plt.set_cmap('nipy_spectral')
+            plt.savefig(fig_dir / 'subregions_i{}.pdf'
+                        .format(n_labels_max-n_labels))
+
+            plt.figure()
+            with warnings.catch_warnings():
+                warnings.simplefilter("ignore")
+                plt.imshow(np.log10(pq_norms+pq_norms.T), origin='lower')
+            plt.ylabel('Sub-region index')
+            plt.xlabel('Sub-region index')
+            plt.colorbar()
+            plt.set_cmap('viridis')
+            plt.title('norms of Gabor multiplier composition')
+            plt.savefig(fig_dir / 'norms__i{}.pdf'
+                        .format(n_labels_max-n_labels))
+
+    if fig_dir is not None:
+        plt.figure()
+        plot_mask(mask=mask_labeled, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Final subregions')
+        plt.set_cmap('nipy_spectral')
+        plt.savefig(fig_dir / 'final_subregions.pdf')
+
+        plt.figure()
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore")
+            plt.imshow(np.log10(pq_norms+pq_norms.T), origin='lower')
+        plt.ylabel('Sub-region index')
+        plt.xlabel('Sub-region index')
+        plt.colorbar()
+        plt.set_cmap('viridis')
+        plt.title('Final norms of Gabor multiplier composition')
+        plt.savefig(fig_dir / 'final_norms.pdf')
+
+    if return_norms:
+        return mask_labeled, pq_norms
+    else:
+        return mask_labeled
+
+
+def _get_pq_norms(mask, dgt_params, signal_params):
+    """
+    Compute distance matrix between sub-regions.
+
+    Parameters
+    ----------
+    mask : nd-array
+        Time-frequency mask with one positive integer for each sub-region
+        and zeros outside sub-regions.
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+
+    Returns
+    -------
+    pq_norms : nd-array
+        Matrix of distances between sub-regions.
+    """
+    n_labels = np.unique(mask).size - 1
+    pq_norms = np.zeros((n_labels, n_labels))
+    for i_p in range(n_labels):
+        for i_q in range(i_p):
+            gabmul_p = GaborMultiplier(mask=(mask == i_p + 1),
+                                       dgt_params=dgt_params,
+                                       signal_params=signal_params)
+            gabmul_q = GaborMultiplier(mask=(mask == i_q + 1),
+                                       dgt_params=dgt_params,
+                                       signal_params=signal_params)
+            gabmul_pq = gabmul_p @ gabmul_q
+            pq_norms[i_p, i_q] = \
+                np.real(eigs(A=gabmul_pq, k=1, return_eigenvectors=False)[0])
+    return pq_norms
+
+
+def _update_pq_norms(mask, pq_norms, i_p, dgt_params, signal_params):
+    """
+    Update (in-place) distance between one particular sub-region and all
+    sub-regions in distance matrix.
+
+    Parameters
+    ----------
+    mask : nd-array
+        Time-frequency mask with one positive integer for each sub-region
+        and zeros outside sub-regions.
+    pq_norms : nd-array
+        Matrix of distances between sub-regions, updated in-place.
+    i_p : int
+        Index of sub-region to be updated
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+
+    """
+    n_labels = pq_norms.shape[0]
+    gabmul_p = GaborMultiplier(mask=(mask == i_p + 1),
+                               dgt_params=dgt_params,
+                               signal_params=signal_params)
+    for i_q in range(n_labels):
+        if i_p == i_q:
+            continue
+        gabmul_q = GaborMultiplier(mask=(mask == i_q + 1),
+                                   dgt_params=dgt_params,
+                                   signal_params=signal_params)
+        gabmul_pq = gabmul_p @ gabmul_q
+        gabmul_pq_norm = \
+            np.real(eigs(A=gabmul_pq, k=1, return_eigenvectors=False)[0])
+        if i_q < i_p:
+            pq_norms[i_p, i_q] = gabmul_pq_norm
+        else:
+            pq_norms[i_q, i_p] = gabmul_pq_norm
+
+
+def _merge_subregions(mask, pq_norms, i_p, i_q):
+    """
+    Merge two sub-regions indexed by `i_p` and `i_q`
+
+
+    In the time-frequency mask, the label of the region indexed by `i_p`
+    will be replace by the label of the region indexed by `i_q` and index
+    `i_p` will be used to relabel the region with highest label.
+
+    In the distance matrix, rows and columns will be moved consequently. The
+    distance between the new, merged sub-region and all other sub-regions is
+    not updated; it can be done by calling :py:func:`_update_pq_norms`.
+
+    Parameters
+    ----------
+    mask : nd-array
+        Time-frequency mask with one positive integer for each sub-region
+        and zeros outside sub-regions.
+    pq_norms : nd-array
+        Matrix of distances between sub-regions.
+    i_p : int
+        Index of sub-region that will be removed after merging.
+    i_q : int
+        Index of sub-region that will receive the result.
+    Returns
+    -------
+    mask : nd-array
+        Updated time-frequency mask with one positive integer for each
+        sub-region and zeros outside sub-regions.
+    pq_norms : nd-array
+        Updated distance matrix (except for distance with the new sub-region).
+
+    """
+    p = i_p + 1
+    q = i_q + 1
+
+    n_labels = pq_norms.shape[0]
+    mask[mask == p] = q
+    mask[mask == n_labels] = p
+    pq_norms[i_p, :i_p - 1] = pq_norms[-1, :i_p - 1]
+    pq_norms[i_p:, i_p] = pq_norms[-1, i_p:]
+    pq_norms = pq_norms[:-1, :-1]
+    return mask, pq_norms
diff --git a/python/tffpy/datasets.py b/python/tffpy/datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..900d444f38f051d8058e318205bdc318797493ef
--- /dev/null
+++ b/python/tffpy/datasets.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+
+.. moduleauthor:: Valentin Emiya
+"""
+from pathlib import Path
+
+import numpy as np
+from matplotlib import pyplot as plt
+from scipy.ndimage import \
+    binary_opening, binary_closing, generate_binary_structure
+
+from madarrays import Waveform
+from ltfatpy import plotdgtreal
+
+from tffpy.tf_tools import get_signal_params, get_dgt_params, GaborMultiplier
+from tffpy.utils import dgt, db, plot_spectrogram, plot_mask, get_data_path
+
+
+default_data_root_dir = get_data_path()
+default_data_dir = default_data_root_dir / 'data_8000Hz_16384samples'
+
+
+def get_dataset():
+    """
+    Get dataset for isolated wideband and localized sources before mixing.
+
+    Returns
+    -------
+    dataset : dict
+        dataset['wideband'] (resp. dataset['localized']) is a dictionary
+        containing the :py:class:`~pathlib.Path` object for all the wideband
+        (resp. localized) sounds.
+    """
+    dataset = dict()
+    dataset['wideband'] = {
+        x.stem: x
+        for x in (default_data_dir / 'wide_band_sources').glob('*.wav')
+    }
+    dataset['localized'] = {
+        x.stem: x
+        for x in (default_data_dir / 'localized_sources').glob('*.wav')
+    }
+    return dataset
+
+
+def get_mix(loc_source, wideband_src, crop=None,
+            wb_to_loc_ratio_db=0, win_dur=128 / 8000, win_type='gauss',
+            hop_ratio=1/4, n_bins_ratio=4, n_iter_closing=2,
+            n_iter_opening=2, delta_mix_db=0, delta_loc_db=30,
+            closing_first=True, or_mask=False,
+            fig_dir=None, prefix=''):
+    """
+    Build the mix two sounds and the related time-frequency boolean mask.
+
+    Parameters
+    ----------
+    loc_source : Path
+        Localized sound file.
+    wideband_src : Path
+        Wideband sound file.
+    crop : int or None
+        If not None, a cropped, centered portion of the sound will be
+        extracted with the specified length, in samples.
+    wb_to_loc_ratio_db : float
+        Wideband source to localized source energy ratio to be adjusted in
+        the mix.
+    win_dur : float
+        Window duration, in seconds.
+    win_type : str
+        Window name
+    hop_ratio : float
+        Ratio of the window length that will be set as hop size for the DGT.
+    n_bins_ratio : float
+        Factor that will be applied to the window length to compute the
+        number of bins in the DGT.
+    delta_mix_db : float
+        Coefficient energy ratio, in dB, between the wideband source and the
+        localized source in the mixture in order to select coefficients in
+        the mask.
+    delta_loc_db : float
+        Dynamic range, in dB, for the localized source in order to select
+        coefficients in the mask.
+    or_mask : bool
+        If True, the mask is build by taking the union of the two masks
+        obtained using thresholds `delta_mix_db` and `delta_loc_db`. If
+        False, the intersection is taken.
+    n_iter_closing : int
+        Number of successive morphological closings with radius 1 (a.k.a.
+        radius of one single closing)
+    n_iter_opening : int
+        Number of successive morphological openings with radius 1 (a.k.a.
+        radius of one single opening)
+    closing_first : bool
+        If True, morphological closings are applied first, followed by
+        openings. If False, the reverse way is used.
+    fig_dir : None or str or Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+    prefix : str
+        If not None, this prefix is used when saving the figures.
+
+    Returns
+    -------
+    x_mix : Waveform
+        Mix signal (sum of outputs `x_loc` and `x_wb`)
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    mask : nd-array
+        Time-frequency binary mask
+    x_loc : Waveform
+        Localized source signal
+    x_wb : Waveform
+        Wideband source signal
+    """
+    dataset = get_dataset()
+
+    x_loc = Waveform.from_wavfile(dataset['localized'][loc_source])
+    x_wb = Waveform.from_wavfile(dataset['wideband'][wideband_src])
+    np.testing.assert_array_equal(x_loc.shape, x_wb.shape)
+    if crop is not None:
+        x_len = crop
+        i_start = (x_loc.shape[0] - x_len) // 2
+        x_loc = x_loc[i_start:i_start+x_len]
+        x_wb = x_wb[i_start:i_start+x_len]
+    signal_params = get_signal_params(sig_len=x_loc.shape[0], fs=x_loc.fs)
+
+    # Unit energy
+    x_loc /= np.linalg.norm(x_loc)
+    x_wb /= np.linalg.norm(x_wb)
+    gain_wb = 1 / (1 + 10 ** (-wb_to_loc_ratio_db / 20))
+    x_loc *= (1 - gain_wb)
+    x_wb *= gain_wb
+
+    # Build mix
+    x_mix = x_loc + x_wb
+
+    # Build dgt
+    fs = x_loc.fs
+    approx_win_len = int(2 ** np.round(np.log2(win_dur * fs)))
+    hop = int(approx_win_len * hop_ratio)
+    n_bins = int(approx_win_len * n_bins_ratio)
+    sig_len = x_loc.shape[0]
+    dgt_params = get_dgt_params(win_type=win_type,
+                                approx_win_len=approx_win_len,
+                                hop=hop, n_bins=n_bins, sig_len=sig_len)
+
+    tf_mat_loc_db = db(np.abs(dgt(x_loc, dgt_params=dgt_params)))
+    tf_mat_wb_db = db(np.abs(dgt(x_wb, dgt_params=dgt_params)))
+
+    # Build mask_raw
+    mask_mix = tf_mat_loc_db > tf_mat_wb_db + delta_mix_db
+    mask_loc = tf_mat_loc_db > tf_mat_loc_db.max() - delta_loc_db
+
+    if or_mask:
+        mask_raw = np.logical_or(mask_mix, mask_loc)
+    else:
+        mask_raw = np.logical_and(mask_mix, mask_loc)
+
+    struct = generate_binary_structure(2, 1)
+    if n_iter_closing > 0:
+        if closing_first:
+            mask = binary_opening(
+                binary_closing(input=mask_raw, structure=struct,
+                               iterations=n_iter_closing, border_value=1),
+                iterations=n_iter_opening, structure=struct, border_value=0)
+        else:
+            mask = binary_closing(
+                binary_opening(input=mask_raw,structure=struct,
+                               iterations=n_iter_opening, border_value=0),
+                iterations=n_iter_closing, structure=struct, border_value=1)
+    else:
+        mask = mask_raw
+
+
+    if fig_dir is not None:
+        fig_dir = Path(fig_dir)
+        fig_dir.mkdir(exist_ok=True, parents=True)
+        if len(prefix) > 0:
+            prefix = prefix + '_'
+
+        plt.figure()
+        plot_mask(mask=mask_mix, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Mask Mix - Area: {} ({:.1%})'.format(mask_mix.sum(),
+                                                        np.average(mask_mix)))
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'mask_mix.pdf')
+
+        plt.figure()
+        plot_mask(mask=mask_loc, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Mask Loc - Area: {} ({:.1%})'.format(mask_loc.sum(),
+                                                        np.average(mask_loc)))
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'mask_loc.pdf')
+
+        plt.figure()
+        plot_spectrogram(x=x_mix, dgt_params=dgt_params, fs=fs)
+        plt.title('Mix')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'mix_spectrogram.pdf')
+
+        plt.figure()
+        plot_mask(mask=mask_raw, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=fs)
+        plt.title('Raw mask')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'raw_mask.pdf')
+
+        plt.figure()
+        plot_mask(mask=mask, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=fs)
+        plt.tight_layout()
+        plt.title('Smoothed mask')
+        plt.savefig(fig_dir / 'smoothed_mask.pdf')
+
+        plt.figure()
+        plot_spectrogram(x=x_loc, dgt_params=dgt_params, fs=fs)
+        plt.title('Loc')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'loc_source.pdf')
+
+        plt.figure()
+        tf_mat = dgt(x_loc, dgt_params=dgt_params) * mask
+        plotdgtreal(coef=tf_mat, a=dgt_params['hop'], M=dgt_params['n_bins'],
+                    fs=fs, dynrange=100)
+        plt.title('Masked loc')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'masked_loc.pdf')
+
+        plt.figure()
+        gabmul = GaborMultiplier(mask=~mask,
+                                 dgt_params=dgt_params,
+                                 signal_params=signal_params)
+        x_est = gabmul @ x_wb
+        plot_spectrogram(x=x_est, dgt_params=dgt_params, fs=fs)
+        plt.title('Filtered wb')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'zerofill_spectrogram.pdf'.format(prefix))
+
+    return x_mix, dgt_params, signal_params, mask, x_loc, x_wb
diff --git a/python/tffpy/experiments/__init__.py b/python/tffpy/experiments/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python/tffpy/experiments/exp_solve_tff.py b/python/tffpy/experiments/exp_solve_tff.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca6e21af4f51bea5e479473873660e2937ba1c0
--- /dev/null
+++ b/python/tffpy/experiments/exp_solve_tff.py
@@ -0,0 +1,956 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+Class `SolveTffExperiment` uses the :class:`yafe.base.Experiment` experiment
+framework to handle the main time-frequency fading experiment: It includes
+loading the data, generating the problems, applying solvers, and exploiting
+results.
+
+See the `documentation <http://skmad-suite.pages.lis-lab.fr/yafe/>`_ of
+package :py:mod:`yafe` for the technical details.
+
+.. moduleauthor:: Valentin Emiya
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib
+import pandas as pd
+from pathlib import Path
+
+from yafe import Experiment
+from madarrays import Waveform
+
+from tffpy.datasets import get_mix, get_dataset
+from tffpy.tf_fading import GabMulTff, compute_lambda_oracle_sdr
+from tffpy.interpolation_solver import solve_by_interpolation
+from tffpy.utils import \
+    sdr, plot_spectrogram, is_div_spectrum, plot_mask, db, dgt
+
+
+class SolveTffExperiment(Experiment):
+    """
+    The main experiment to solve time-frequency fading problems with a
+    number of sounds mixtures and solvers.
+
+    Parameters
+    ----------
+    force_reset : bool
+        If true, reset the experiment by erasing all previous results
+        in order to run it from scratch. If False, the existing results are
+        kept in order to proceed with the existing experiment.
+    suffix : str
+        Suffix that is appended to the name of the experiment, useful to
+        save results in a specific folder.
+    """
+
+    def __init__(self, force_reset=False, suffix=''):
+        Experiment.__init__(self,
+                            name='SolveTffExperiment' + suffix,
+                            get_data=get_data,
+                            get_problem=Problem,
+                            get_solver=Solver,
+                            measure=perf_measures,
+                            force_reset=force_reset,
+                            log_to_file=False,
+                            log_to_console=False)
+        self.fig_dir = self.xp_path / 'figures'
+        # a little trick to save collections when computing performance
+        self.measure = lambda **x: perf_measures(**x, exp=self)
+
+    @property
+    def n_tasks(self):
+        """
+        Number of tasks
+
+        Returns
+        -------
+        int
+        """
+        return len(list((self.xp_path / 'tasks').glob('*')))
+
+    @staticmethod
+    def get_experiment(setting='full', force_reset=False):
+        """
+        Get the experiment instance with default values in order to handle it.
+
+        Parameters
+        ----------
+        setting : {'full', 'light'}
+            If 'full', the default values are set to run the full
+            experiment. If 'light', the default values are set to have a
+            very light experiment with few tasks, running fast, for test
+            purposes.
+        force_reset : bool
+            If true, reset the experiment by erasing all previous results
+            in order to run it from scratch. If False, the existing results are
+            kept in order to proceed with the existing experiment.
+
+        Returns
+        -------
+        SolveTffExperiment
+        """
+        assert setting in ('full', 'light')
+
+        dataset = get_dataset()
+        # Set task parameters
+        data_params = dict(loc_source=list(dataset['localized'].keys()),
+                           wideband_src=list(dataset['wideband'].keys()))
+        problem_params = dict(win_choice=['gauss 256', 'hann 512'],
+                              wb_to_loc_ratio_db=8,
+                              n_iter_closing=3, n_iter_opening=3,
+                              closing_first=True,
+                              delta_mix_db=0,
+                              delta_loc_db=40,
+                              or_mask=True,
+                              crop=None,
+                              fig_dir=None)
+        solver_params = dict(tol_subregions=[None, 1e-5],
+                             tolerance_arrf=1e-3,
+                             proba_arrf=1 - 1e-4)
+        if setting == 'light':
+            data_params['loc_source'] = 'bird'
+            data_params['wideband_src'] = 'car'
+            problem_params['win_choice'] = ['gauss 64', 'hann 128']
+            problem_params['crop'] = 4096
+            problem_params['delta_loc_db'] = 20
+            problem_params['wb_to_loc_ratio_db'] = 16
+            solver_params['tolerance_arrf'] = 1e-2
+            solver_params['proba_arrf'] = 1 - 1e-2
+
+        # Create Experiment
+        suffix = '' if setting == 'full' else '_Light'
+        exp = SolveTffExperiment(force_reset=force_reset,
+                                 suffix=suffix)
+        exp.add_tasks(data_params=data_params,
+                      problem_params=problem_params,
+                      solver_params=solver_params)
+        exp.generate_tasks()
+        return exp
+
+    def export_task_params(self, csv_path=None):
+        """
+        Export task parameters to a csv file and to a
+        :class:`pandas.DataFrame` object.
+
+        Parameters
+        ----------
+        csv_path : str or Path
+            Name of the csv file to be written. If None, file is
+            located in the experiment folder with name 'task_params.csv'.
+
+        Returns
+        -------
+        pandas.DataFrame
+        """
+        if csv_path is None:
+            csv_path = self.xp_path / 'task_params.csv'
+        else:
+            csv_path = Path(csv_path)
+        task_list = []
+        for i_task in range(self.n_tasks):
+            task = self.get_task_data_by_id(idt=i_task)
+            task_list.append({k + '_' + kk: task['task_params'][k][kk]
+                              for k in task['task_params']
+                              for kk in task['task_params'][k]})
+        df = pd.DataFrame(task_list)
+        df.to_csv(csv_path)
+        print('Task params exported to', csv_path)
+        return df
+
+    def generate_tasks(self):
+        """
+        Generate tasks and export params to a csv file
+
+        See :py:meth:`yafe.Experiment.generate_tasks`
+        """
+        Experiment.generate_tasks(self)
+        self.export_task_params()
+
+    def get_misc_file(self, task_params=None, idt=None):
+        """
+        Get file with some additional task results.
+
+        This has been set up in order to pass additional data in a way that
+        could not be handled by the :py:mod:`yafe` framework.
+
+        Parameters
+        ----------
+        task_params : dict
+            Task parameters.
+        idt : int
+            Task identifier. Either `task_params` or `idt` should be given
+            in order to specify the task.
+
+        Returns
+        -------
+        Path
+            File containing additional task results.
+        """
+        if task_params is not None:
+            task = self.get_task_data_by_params(
+                data_params=task_params['data_params'],
+                problem_params=task_params['problem_params'],
+                solver_params=task_params['solver_params'])
+            idt = task['id_task']
+        elif idt is None:
+            raise ValueError('Either `task_params` or `idt` should be given.')
+        path_task = self.xp_path / 'tasks' / '{:06}'.format(idt)
+        return path_task / 'misc.npz'
+
+    def plot_results(self):
+        """
+        Plot and save results of the experiment
+        """
+        self.fig_dir.mkdir(parents=True, exist_ok=True)
+        print('Figures saved in {}'.format(self.fig_dir))
+        results = self.load_results(array_type='xarray')
+        results = results.squeeze()
+        coords_dict = results.to_dict()['coords']
+        csv_path = self.fig_dir / 'exp_solve_pd.csv'
+        results.to_series().to_csv(csv_path, header=True)
+
+        print('number of nan values:',
+              np.sum(np.isnan(results.values)))
+
+        # Scatter plot for running times : tff-1 vs. tff-P
+        plt.figure()
+        for win_type in coords_dict['problem_win_choice']['data']:
+            t_tff1 = results.sel(measure=['t_lambda_tff', 't_arrf', 't_evdn'],
+                                 problem_win_choice=win_type,
+                                 solver_tol_subregions=None)
+            t_tff1 = t_tff1.sum(dim='measure')
+
+            not_none = coords_dict['solver_tol_subregions']['data'].copy()
+            not_none.remove(None)
+            not_none = not_none[0]
+            t_tffp = results.sel(measure=['t_lambda_tff', 't_arrf',
+                                          't_evdn', 't_subreg'],
+                                 problem_win_choice=win_type,
+                                 solver_tol_subregions=not_none)
+            t_tffp = t_tffp.sum(dim='measure')
+
+            if win_type[:5] == 'gauss':
+                win_type_label = 'Gauss'
+            else:
+                win_type_label = 'Hann'
+            plt.plot(t_tff1.values.reshape(-1),
+                     t_tffp.values.reshape(-1),
+                     '+', label=win_type_label)
+        plt.xlabel(r'Running time for TFF-1 (s)')
+        plt.ylabel(r'Running time for TFF-P (s)')
+        plt.legend()
+        plt.grid()
+        plt.savefig(self.fig_dir / 'running_times_exp.pdf')
+        plt.savefig(self.fig_dir / 'running_times_exp.png')
+        plt.xscale('log')
+        plt.yscale('log')
+        plt.savefig(self.fig_dir / 'running_times_exp_loglog.pdf')
+        plt.savefig(self.fig_dir / 'running_times_exp_loglog.png')
+
+        # Scatter plot for running times : tff-1 and tff-P vs. mask size
+        plt.figure()
+        symbol = '+'
+        for win_type in coords_dict['problem_win_choice']['data']:
+            mask_size_tff1 = results.sel(measure=['mask_size'],
+                                         problem_win_choice=win_type,
+                                         solver_tol_subregions=None).squeeze()
+            t_tff1 = results.sel(measure=['t_lambda_tff', 't_arrf', 't_evdn'],
+                                 problem_win_choice=win_type,
+                                 solver_tol_subregions=None)
+            t_tff1 = t_tff1.sum(dim='measure')
+            plt.plot(mask_size_tff1.values.reshape(-1),
+                     t_tff1.values.reshape(-1),
+                     symbol, label='{} - {}'.format('TFF-1', win_type))
+
+        not_none = coords_dict['solver_tol_subregions']['data'].copy()
+        not_none.remove(None)
+        not_none = not_none[0]
+        for win_type in coords_dict['problem_win_choice']['data']:
+            mask_size_tffp = results.sel(
+                measure=['mask_size'],
+                problem_win_choice=win_type,
+                solver_tol_subregions=not_none).squeeze()
+            t_tffp = results.sel(measure=['t_lambda_tff', 't_arrf',
+                                          't_evdn', 't_subreg'],
+                                 problem_win_choice=win_type,
+                                 solver_tol_subregions=not_none)
+            t_tffp = t_tffp.sum(dim='measure')
+
+            plt.plot(mask_size_tffp.values.reshape(-1),
+                     t_tffp.values.reshape(-1),
+                     symbol, label='{} - {}'.format('TFF-P', win_type))
+        plt.ylabel('Running time (s)')
+        plt.xlabel('Mask size')
+        plt.legend()
+        plt.grid()
+        plt.savefig(self.fig_dir / 'running_times_masksize_exp.pdf')
+        plt.savefig(self.fig_dir / 'running_times_masksize_exp.png')
+        plt.xscale('log')
+        plt.yscale('log')
+        plt.savefig(self.fig_dir / 'running_times_masksize_exp_loglog.pdf')
+        plt.savefig(self.fig_dir / 'running_times_masksize_exp_loglog.png')
+
+        # Scatter plot : SDR vs IS
+        plt.figure()
+        symbol = '+'
+        for k_measure in results.coords['measure'].values:
+            if k_measure[:3] == 'sdr':
+                sdr_res = results.sel(measure=[k_measure]).squeeze()
+                is_res = results.sel(measure=['is' + k_measure[3:]]).squeeze()
+                plt.plot(sdr_res.values.reshape(-1),
+                         is_res.values.reshape(-1),
+                         symbol,
+                         label=k_measure[4:])
+        plt.legend()
+        plt.grid()
+        plt.yscale('log')
+        plt.savefig(self.fig_dir / 'sdr_vs_is.pdf')
+        plt.savefig(self.fig_dir / 'sdr_vs_is.png')
+
+        # Scatter plot : SDR vs IS with polygons
+        sdr_tff1 = results.sel(
+            measure='sdr_tff',
+            solver_tol_subregions=None).squeeze().values.reshape(-1)
+        sdr_tffp = results.sel(
+            measure='sdr_tff',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        sdr_tffo = results.sel(
+            measure='sdr_tffo',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        sdr_interp = results.sel(
+            measure='sdr_interp',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        sdr_zero = results.sel(
+            measure='sdr_zero',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        is_tff1 = results.sel(
+            measure='is_tff',
+            solver_tol_subregions=None).squeeze().values.reshape(-1)
+        is_tffp = results.sel(
+            measure='is_tff',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        is_tffo = results.sel(
+            measure='is_tffo',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        is_interp = results.sel(
+            measure='is_interp',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+        is_zero = results.sel(
+            measure='is_zero',
+            solver_tol_subregions=not_none).squeeze().values.reshape(-1)
+
+        plt.figure()
+        symbol = '+'
+        for i in range(sdr_tff1.size):
+            plt.plot([sdr_tff1[i], sdr_tffp[i], sdr_tffo[i],
+                      sdr_zero[i], sdr_interp[i], sdr_tff1[i]],
+                     [is_tff1[i], is_tffp[i], is_tffo[i],
+                      is_zero[i], is_interp[i], is_tff1[i]],
+                     'k', alpha=0.2)
+        plt.plot(sdr_tff1, is_tff1, symbol, label='TFF-1')
+        plt.plot(sdr_tffp, is_tffp, symbol, label='TFF-P')
+        plt.plot(sdr_tffo, is_tffo, symbol, label='TFF-O')
+        plt.plot(sdr_interp, is_interp, symbol, label='Interp')
+        plt.plot(sdr_zero, is_zero, symbol, label='Zero fill')
+        plt.legend()
+        plt.grid()
+        plt.xlabel('SDR')
+        plt.ylabel('IS divergence')
+        plt.yscale('log')
+        plt.savefig(self.fig_dir / 'sdr_vs_is_polygons.pdf')
+        plt.savefig(self.fig_dir / 'sdr_vs_is_polygons.png')
+
+    @staticmethod
+    def _get_label(k, tol_subregions):
+        if k.endswith('tffo'):
+            label = 'TFF-O'
+        elif k.endswith('tff'):
+            label = 'TFF-1' if tol_subregions is None else 'TFF-P'
+        elif k.endswith('tffe'):
+            label = 'TFF-E'
+        elif k.endswith('interp'):
+            label = 'Interp'
+        elif k.endswith('zero'):
+            label = 'Zero fill'
+        elif k.endswith('mix'):
+            label = 'Mix'
+        else:
+            raise ValueError('Unknown key: ' + k)
+        return label
+
+    def plot_task(self, idt, fontsize=16):
+        """
+        Plot and save figures for a specific task
+
+        Parameters
+        ----------
+        idt : int
+            Task identifier
+        fontsize : int
+            Fontsize to be used in Figures.
+        """
+        matplotlib.rcParams.update({'font.size': fontsize})
+        fig_dir = self.xp_path / 'figures' / 'tasks' / '{:06}'.format(idt)
+        fig_dir.mkdir(parents=True, exist_ok=True)
+        print('Save figures in:', fig_dir)
+
+        task = self.get_task_data_by_id(idt=idt)
+        misc_data = np.load(self.get_misc_file(idt=idt))
+
+        mask = task['problem_data']['mask']
+        dgt_params = task['problem_data']['dgt_params']
+        signal_params = task['problem_data']['signal_params']
+        x_mix = task['problem_data']['x_mix']
+        x_wb = task['solution_data']['x_wb']
+        tol_subregions = task['task_params']['solver_params']['tol_subregions']
+        gmtff = task['solved_data']['gmtff']
+        sdr_res = dict()
+        is_res = dict()
+        for k in task['result']:
+            k_suf = k.split('_')[-1]
+            if k.startswith('sdr'):
+                sdr_res[k_suf] = task['result'][k]
+            elif k.startswith('is'):
+                is_res[k_suf] = task['result'][k]
+        lambda_res = dict()
+        for k in misc_data:
+            k_suf = k.split('_')[-1]
+            lambda_res[k_suf] = misc_data[k]
+
+        plt.figure()
+        plot_mask(mask=mask, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Area: {} ({:.1%})'.format(mask.sum(), np.average(mask)))
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'mask.pdf')
+
+        plt.figure()
+        for i_area in range(gmtff.n_areas):
+            s_vec = gmtff.s_vec_list[i_area]
+            plt.plot(s_vec, label='Sub-region {}'.format(i_area + 1))
+        plt.xlabel('k')
+        plt.ylabel('$\\sigma_k$')
+        plt.yscale('log')
+        plt.grid()
+        plt.legend()
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'gabmul_eigenvalues.pdf')
+
+        # Results
+        def sdr_wb(lambda_coef):
+            return sdr(x_ref=x_wb, x_est=gmtff.compute_estimate(lambda_coef))
+
+        def is_wb(lambda_coef):
+            return is_div_spectrum(x_ref=x_wb,
+                                   x_est=gmtff.compute_estimate(lambda_coef))
+
+        def sdr_wb_1area(lambda_coef, i_area):
+            lambda_vec = np.ones(gmtff.n_areas)
+            lambda_vec[i_area] = lambda_coef
+            return sdr(x_ref=x_wb, x_est=gmtff.compute_estimate(lambda_vec))
+
+        l_range = 10 ** np.linspace(-10, 10, 100, endpoint=True)
+        if tol_subregions is None:
+            plt.figure()
+            plt.plot(l_range, [sdr_wb(i) for i in l_range], '-',
+                     label='SDR')
+            for k in lambda_res:
+                plt.plot(lambda_res[k], sdr_wb(lambda_res[k]), 'o',
+                         label=self._get_label(k=k,
+                                               tol_subregions=tol_subregions))
+        else:
+            plt.figure()
+            for i_area in range(gmtff.n_areas):
+                plt.plot(l_range,
+                         [sdr_wb_1area(i, i_area) for i in l_range],
+                         '-', label='SDR sub-reg {}'.format(i_area + 1))
+            for k in lambda_res:
+                label_prefix = self._get_label(k=k,
+                                               tol_subregions=tol_subregions)
+                if not isinstance(lambda_res[k], np.ndarray):
+                    plt.plot(lambda_res[k], sdr_wb(lambda_res[k]),
+                             'o', label=label_prefix)
+                    continue
+
+                for i_area in range(gmtff.n_areas):
+                    label = '{} {}'.format(label_prefix, i_area + 1)
+                    plt.plot(lambda_res[k][i_area],
+                             sdr_wb_1area(lambda_res[k][i_area], i_area),
+                             'o', label=label)
+        plt.xlabel('$\\lambda$')
+        plt.ylabel('SDR (dB)')
+        plt.xscale('log')
+        plt.grid()
+        plt.legend()
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'tuning_lambda.pdf')
+
+        if tol_subregions is None:
+            plt.figure()
+            plt.plot(l_range, [is_wb(i) for i in l_range], '-',
+                     label='IS')
+            for k in lambda_res:
+                plt.plot(lambda_res[k], is_wb(lambda_res[k]), 'o',
+                         label=self._get_label(k=k,
+                                               tol_subregions=tol_subregions))
+            plt.xlabel('$\\lambda$')
+            plt.ylabel('IS (dB)')
+            plt.xscale('log')
+            plt.grid()
+            plt.legend()
+            plt.tight_layout()
+            plt.savefig(fig_dir / 'tuning_lambda_IS.pdf')
+
+            fig, ax1 = plt.subplots()
+            color = 'tab:blue'
+            ax1.set_xlabel('$\\lambda$')
+            ax1.set_ylabel('SDR (dB)', color=color)
+            ax1.plot(l_range, [sdr_wb(i) for i in l_range], '-',
+                     color=color)
+            for k in lambda_res:
+                ax1.plot(lambda_res[k], sdr_wb(lambda_res[k]), 'o',
+                         label=self._get_label(k=k,
+                                               tol_subregions=tol_subregions))
+            ax1.tick_params(axis='y', labelcolor=color)
+            plt.xscale('log')
+            ax1.grid()
+
+            # instantiate a second axes that shares the same x-axis
+            ax2 = ax1.twinx()
+            color = 'tab:red'
+            ax2.set_xlabel('$\\lambda$')
+            ax2.set_ylabel('IS divergence', color=color)
+            ax2.plot(l_range, [is_wb(i) for i in l_range], '-',
+                     color=color)
+            for k in lambda_res:
+                ax2.plot(lambda_res[k], is_wb(lambda_res[k]), 'o',
+                         label=self._get_label(k=k,
+                                               tol_subregions=tol_subregions))
+            ax2.tick_params(axis='y', labelcolor=color)
+            fig.tight_layout()
+            ax2.legend()
+            plt.tight_layout()
+            plt.savefig(fig_dir / 'tuning_lambda_SDR_IS.pdf')
+
+        x_dict = dict()
+        for k in lambda_res:
+            x_dict[k] = gmtff.compute_estimate(lambda_res[k])
+            x_dict[k].to_wavfile(fig_dir / 'x_{}.wav'.format(k))
+        x_dict['interp'] = Waveform(solve_by_interpolation(
+            x_mix=x_mix, mask=mask, dgt_params=dgt_params,
+            signal_params=signal_params), fs=signal_params['fs'])
+        x_dict['interp'].to_wavfile(fig_dir / 'x_interp.wav')
+
+        for k in sdr_res:
+            print(self._get_label(k=k, tol_subregions=tol_subregions))
+            print('  - SDR: {:.1f}dB'.format(sdr_res[k]))
+            print('  - IS: {:.1f}'.format(is_res[k]))
+            if k in lambda_res:
+                print('  - lambda: ', lambda_res[k])
+
+        x_mix_tf = dgt(sig=x_mix, dgt_params=dgt_params)
+        x_max = db(x_mix_tf).max()
+        clim = x_max - 100, x_max
+        for k in x_dict:
+            plt.figure()
+            plot_spectrogram(x=x_dict[k], dgt_params=dgt_params,
+                             fs=signal_params['fs'], clim=clim)
+            plt.title('{} - SDR={:.1f}dB - IS={:.1f}'
+                      .format(self._get_label(k=k,
+                                              tol_subregions=tol_subregions),
+                              sdr_res[k], is_res[k]))
+            plt.tight_layout()
+            plt.savefig(fig_dir / '{}.pdf'.format(k))
+
+        plt.figure()
+        plot_spectrogram(x=x_wb, dgt_params=dgt_params,
+                         fs=signal_params['fs'],
+                         clim=clim)
+        plt.title('True source')
+        plt.tight_layout()
+        plt.savefig(fig_dir / 'spectrogram_true_wb_source.pdf')
+
+
+def get_data(loc_source, wideband_src):
+    """
+    Prepare the input data information for the :py:class:`SolveTffExperiment`
+    experiment.
+
+    This function is only embedding its input in a dictionary
+
+    Parameters
+    ----------
+    loc_source : Path
+        File for the source localized in time-frequency (perturbation)
+    wideband_src : Path
+        File for the source of interest.
+
+    Returns
+    -------
+    dict
+        Dictionary to be given when calling the problemm (
+        see :py:meth:`Problem.__call__`), with keys `'loc_source'` and
+        `wideband_src`.
+    """
+    return dict(loc_source=loc_source, wideband_src=wideband_src)
+
+
+class Problem:
+    """
+    Problem generation for the :py:class:`SolveTffExperiment` experiment.
+
+    Parameters
+    ----------
+    crop : int or None
+        If not None, a cropped, centered portion of the sound will be
+        extracted with the specified length, in samples.
+    win_choice : str
+        String of the form 'name len' where 'name' is a window name and
+        'len' is a window length, e.g. 'hann 512', 'gauss 256.
+    delta_mix_db : float
+        Coefficient energy ratio, in dB, between the wideband source and the
+        localized source in the mixture in order to select coefficients in
+        the mask.
+    delta_loc_db : float
+        Dynamic range, in dB, for the localized source in order to select
+        coefficients in the mask.
+    wb_to_loc_ratio_db : float
+        Wideband source to localized source energy ratio to be adjusted in
+        the mix.
+    or_mask : bool
+        If True, the mask is build by taking the union of the two masks
+        obtained using thresholds `delta_mix_db` and `delta_loc_db`. If
+        False, the intersection is taken.
+    n_iter_closing : int
+        Number of successive morphological closings with radius 1 (a.k.a.
+        radius of one single closing)
+    n_iter_opening : int
+        Number of successive morphological openings with radius 1 (a.k.a.
+        radius of one single opening)
+    closing_first : bool
+        If True, morphological closings are applied first, followed by
+        openings. If False, the reverse way is used.
+    fig_dir : None or str or Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+    """
+
+    def __init__(self, crop, win_choice,
+                 delta_mix_db, delta_loc_db, wb_to_loc_ratio_db, or_mask,
+                 n_iter_closing, n_iter_opening, closing_first, fig_dir):
+        win_type, win_len_str = win_choice.split(sep=' ')
+        win_dur = int(win_len_str) / 8000
+        self.win_dur = win_dur
+        self.win_type = win_type
+        if win_type == 'gauss':
+            self.hop_ratio = 1 / 4
+            self.n_bins_ratio = 4
+        else:
+            self.hop_ratio = 1 / 8
+            self.n_bins_ratio = 2
+        self.n_iter_closing = n_iter_closing
+        self.n_iter_opening = n_iter_opening
+        self.closing_first = closing_first
+        self.delta_mix_db = delta_mix_db
+        self.delta_loc_db = delta_loc_db
+        self.wb_to_loc_ratio_db = wb_to_loc_ratio_db
+        self.or_mask = or_mask
+        self.crop = crop
+        self.fig_dir = fig_dir
+
+    def __call__(self, loc_source, wideband_src):
+        """
+        Generate the problem from input data.
+
+        Parameters
+        ----------
+        loc_source : Path
+            File for the source localized in time-frequency (perturbation)
+        wideband_src : Path
+            File for the source of interest.
+
+        Returns
+        -------
+        problem_data : dict
+            Dictionary to be given to a solver, with keys `'x_mix'` (mix
+            signal), `mask` (time-frequency mask), `dgt_params` (DGT
+            parameters) and `signal_params` (signal parameters).
+        solution_data : dict
+            Dictionary containing problem solutions, with keys `'x_loc'` (
+            localized signal ) and `x_wb` (wideband signal).
+        """
+        x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \
+            get_mix(loc_source=loc_source,
+                    wideband_src=wideband_src,
+                    crop=self.crop,
+                    win_dur=self.win_dur,
+                    win_type=self.win_type,
+                    hop_ratio=self.hop_ratio,
+                    n_bins_ratio=self.n_bins_ratio,
+                    n_iter_closing=self.n_iter_closing,
+                    n_iter_opening=self.n_iter_opening,
+                    closing_first=self.closing_first,
+                    delta_mix_db=self.delta_mix_db,
+                    delta_loc_db=self.delta_loc_db,
+                    wb_to_loc_ratio_db=self.wb_to_loc_ratio_db,
+                    or_mask=self.or_mask,
+                    fig_dir=self.fig_dir)
+
+        problem_data = dict(x_mix=x_mix, mask=mask,
+                            dgt_params=dgt_params, signal_params=signal_params)
+        solution_data = dict(x_loc=x_loc, x_wb=x_wb)
+        return problem_data, solution_data
+
+
+class Solver:
+    """
+    Solver for the :py:class:`SolveTffExperiment` experiment.
+
+    This solver is computing
+
+    * the `TFF-1` of `TFF-P` solution (depending on parameter `tol_subregions`)
+      using a :py:class:`~tffpy.tf_fading.GabMulTff` instance
+    * the `Interp` solution using function
+      :py:func:`~tffpy.interpolation_solver.solve_by_interpolation`
+
+    Parameters
+    ----------
+    tol_subregions : None or float
+        Tolerance to split the mask into sub-regions in
+        :py:class:`~tffpy.tf_fading.GabMulTff`.
+    tolerance_arrf : float
+        Tolerance for the randomized EVD in
+        :py:class:`~tffpy.tf_fading.GabMulTff`, see method
+        :py:meth:`~tffpy.tf_fading.GabMulTff.compute_decomposition`.
+    proba_arrf : float
+        Probability of error for the randomized EVD in
+        :py:class:`~tffpy.tf_fading.GabMulTff`, see method
+        :py:meth:`~tffpy.tf_fading.GabMulTff.compute_decomposition`.
+    """
+
+    def __init__(self, tol_subregions, tolerance_arrf, proba_arrf):
+        self.tol_subregions = tol_subregions
+        self.tolerance_arrf = tolerance_arrf
+        self.proba_arrf = proba_arrf
+
+    def __call__(self, x_mix, mask, dgt_params, signal_params):
+        """
+        Apply the solver to estimate solutions from the problem data.
+
+        The output dictionary is composed of data with keys:
+
+        * `'x_tff'`: solution estimated by :py:class:`~tffpy.tf_fading.GabMulTff`
+        * `'x_zero'`: solution when applying the Gabor
+          multiplier (i.e., :math:`\lambda=1`)
+        * `'x_interp'`: solution from function
+          :py:func:`~tffpy.interpolation_solver.solve_by_interpolation`
+        * `'gmtff'`: `GabMulTff` instance
+        * `'t_lambda_tff'`: running times to estimate hyperparameter in method
+          :py:meth:`~tffpy.tf_fading.GabMulTff.compute_lambda`
+        * `'t_arrf'`: running times to compute range approximation in method
+          :py:meth:`~tffpy.tf_fading.GabMulTff.compute_decomposition`
+        * `'t_evdn'`: running times to compute EVD in method
+          :py:meth:`~tffpy.tf_fading.GabMulTff.compute_decomposition`
+        * `'t_uh_x'`: running times to compute additional matrix products in
+          method :py:meth:`~tffpy.tf_fading.GabMulTff.compute_decomposition`
+        * `'t_subreg'`: running times to split mask into sub-regions in class
+          :py:class:`~tffpy.tf_fading.GabMulTff`
+        * `'lambda_tff'`: estimated values for hyper-parameters
+          :math:`\lambda_i` estimated by
+          :py:meth:`~tffpy.tf_fading.GabMulTff`.compute_lambda`
+
+        Parameters
+        ----------
+        x_mix : nd-array
+            Mix signal
+        mask : nd-array
+            Time-frequency mask
+        dgt_params : dict
+            DGT parameters
+        signal_params : dict
+            Signal parameters
+
+        Returns
+        -------
+        dict
+            The estimated solution and additional information
+        """
+        gmtff = GabMulTff(x_mix=x_mix, mask=mask, dgt_params=dgt_params,
+                        signal_params=signal_params,
+                        tol_subregions=self.tol_subregions)
+        gmtff.compute_decomposition(tolerance_arrf=self.tolerance_arrf,
+                                  proba_arrf=self.proba_arrf)
+
+        # Estimate energy and lambda
+        lambda_tff, t_lambda_tff = gmtff.compute_lambda(x_mix=x_mix)
+        print('Running time to tune lambda (est): {} s'.format(t_lambda_tff))
+
+        x_tff = gmtff.compute_estimate(lambda_tff)
+        x_zero = gmtff.compute_estimate(1)
+        x_interp = solve_by_interpolation(
+            x_mix=x_mix, mask=mask, dgt_params=dgt_params,
+            signal_params=signal_params)
+        return dict(x_tff=x_tff, x_zero=x_zero, x_interp=x_interp, gmtff=gmtff,
+                    t_lambda_tff=t_lambda_tff, t_arrf=gmtff.t_arrf,
+                    t_evdn=gmtff.t_evdn, t_uh_x=gmtff.t_uh_x,
+                    t_subreg=gmtff.t_subreg, lambda_tff=lambda_tff)
+
+
+def perf_measures(task_params, source_data, problem_data,
+                  solution_data, solved_data, exp=None):
+    """
+    Performance measure, including computation of oracle solutions
+
+    Parameters
+    ----------
+    task_params : dict
+        Task parameters
+    source_data : dict
+        Input data
+    problem_data : dict
+        Problem data
+    solution_data : dict
+        Solver output
+    solved_data : dict
+        True solution data
+    exp : SolveTffExperiment
+        The experiment
+
+    Returns
+    -------
+    dict
+        All data useful for result analysis including SDR and Itakura-Saito
+        performance, running times, hyperparameter values, mask size and
+        number of sub-regions.
+    """
+    x_tff = solved_data['x_tff']
+    x_zero = solved_data['x_zero']
+    gmtff = solved_data['gmtff']
+    lambda_tff = solved_data['lambda_tff']
+    x_interp = solved_data['x_interp']
+    x_mix = problem_data['x_mix']
+    x_wb = solution_data['x_wb']
+
+    # Trick for storing additional results
+    misc_file = exp.get_misc_file(task_params=task_params)
+
+    # Orcale SDR
+    lambda_tffo, t_lambda_tffo = compute_lambda_oracle_sdr(gmtff=gmtff, x_wb=x_wb)
+    x_tffo = gmtff.compute_estimate(lambda_tffo)
+
+    # Oracle true energy
+    e_target_tffe = np.empty(gmtff.n_areas)
+    x_wb_tf_mat = dgt(x_wb, dgt_params=gmtff.dgt_params)
+    for i_area in range(gmtff.n_areas):
+        mask_i = gmtff.mask == i_area + 1
+        x_wb_tf_masked = mask_i * x_wb_tf_mat
+        e_target_tffe[i_area] = \
+            np.linalg.norm(x_wb_tf_masked, 'fro') ** 2
+
+    lambda_tffe, t_lambda_tffe = gmtff.compute_lambda(
+        x_mix=x_mix, e_target=e_target_tffe)
+    x_tffe = gmtff.compute_estimate(lambda_tffe)
+
+    solutions = dict(tffo=x_tffo,
+                     tff=x_tff,
+                     tffe=x_tffe,
+                     zero=x_zero,
+                     mix=x_mix,
+                     interp=x_interp)
+    sdr_res = {'sdr_' + k: sdr(x_ref=x_wb, x_est=x)
+               for k, x in solutions.items()}
+    is_res = {'is_' + k: is_div_spectrum(x_ref=x_wb, x_est=x)
+              for k, x in solutions.items()}
+
+    np.savez(misc_file,
+             lambda_tffe=lambda_tffe,
+             lambda_tffo=lambda_tffo,
+             lambda_tff=lambda_tff)
+    running_times = dict(t_lambda_tffe=np.sum(t_lambda_tffe),
+                         t_lambda_tffo=np.sum(t_lambda_tffo),
+                         t_lambda_tff=np.sum(solved_data['t_lambda_tff']),
+                         t_arrf=np.sum(solved_data['t_arrf']),
+                         t_evdn=np.sum(solved_data['t_evdn']),
+                         t_uh_x=np.sum(solved_data['t_uh_x']),
+                         t_subreg=solved_data['t_subreg']
+                         )
+    features = dict(mask_size=np.sum(gmtff.mask > 0),
+                    n_subregions=gmtff.n_areas)
+    return dict(**running_times, **sdr_res, **is_res, **features)
+
+
+def create_and_run_light_experiment():
+    """
+    Create a light experiment and run it
+    """
+    exp = SolveTffExperiment.get_experiment(setting='light', force_reset=True)
+    print('*' * 80)
+    print('Created experiment')
+    print(exp)
+    print(exp.display_status())
+
+    print('*' * 80)
+    print('Run task 0')
+    task_data = exp.get_task_data_by_id(idt=0)
+    print(task_data.keys())
+    print(task_data['task_params']['data_params'])
+
+    problem = exp.get_problem(
+        **task_data['task_params']['problem_params'])
+    print(problem)
+
+    print('*' * 80)
+    print('Run all')
+    exp.launch_experiment()
+
+    print('*' * 80)
+    print('Collect and plot results')
+    exp.collect_results()
diff --git a/python/tffpy/experiments/tests/test_exp_solve_tff.py b/python/tffpy/experiments/tests/test_exp_solve_tff.py
new file mode 100644
index 0000000000000000000000000000000000000000..018a6d47f85e202d5931896defdf2d37059958b6
--- /dev/null
+++ b/python/tffpy/experiments/tests/test_exp_solve_tff.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""Test of the module :module:`tffpy.experiments.exp_solve_tff`
+
+.. moduleauthor:: Valentin Emiya
+"""
+import unittest
+
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+mpl.rcParams['figure.max_open_warning'] = 40
+
+from tffpy.experiments.exp_solve_tff import \
+    SolveTffExperiment, create_and_run_light_experiment
+from tffpy.tests.ci_config import create_config_files
+
+
+class TestSolveTffExperiment(unittest.TestCase):
+    def setUp(self):
+        create_config_files()
+
+    def test_light_experiment(self):
+        create_and_run_light_experiment()
+
+        light_exp = SolveTffExperiment.get_experiment(
+            setting='light', force_reset=False)
+        for idt in range(light_exp.n_tasks):
+            light_exp.plot_task(idt=idt, fontsize=16)
+            plt.close('all')
+        light_exp.plot_results()
+        plt.close('all')
+
+    def test_create_full_experiment(self):
+        experiment = SolveTffExperiment.get_experiment(
+            setting='full', force_reset=False)
+        experiment.display_status()
diff --git a/python/tffpy/interpolation_solver.py b/python/tffpy/interpolation_solver.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ef936651e67e13145f25367f95ad861ad81553f
--- /dev/null
+++ b/python/tffpy/interpolation_solver.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+
+.. moduleauthor:: Valentin Emiya
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+from pathlib import Path
+from ltfatpy import plotdgtreal
+
+from tffpy.utils import dgt, plot_spectrogram, plot_mask, idgt
+
+
+def solve_by_interpolation(x_mix, mask, dgt_params, signal_params,
+                           fig_dir=None):
+    """
+    Time-frequency fading solver using linear interpolation and random phases
+
+    Parameters
+    ----------
+    x_mix : nd-array
+        Mix signal
+    mask : nd-array
+        Time-frequency mask
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    fig_dir : str or Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+
+    Returns
+    -------
+    nd-array
+        Estimated signal
+    """
+    x_tf = dgt(sig=x_mix, dgt_params=dgt_params)
+    mask = mask > 0
+    x_tf[mask] = np.nan
+
+    f_range = np.arange(x_tf.shape[0])
+    for j in range(x_tf.shape[1]):
+        ind_nan = np.isnan(x_tf[:, j])
+        x_tf[ind_nan, j] = np.interp(x=f_range[ind_nan],
+                                     xp=np.nonzero(~ind_nan)[0],
+                                     fp=x_tf[~ind_nan, j])
+        x_tf[ind_nan, j] *= np.exp(2 * 1j * np.pi
+                                   * np.random.rand(np.sum(ind_nan)))
+
+    x_est = idgt(tf_mat=x_tf, dgt_params=dgt_params,
+                 sig_len=signal_params['sig_len'])
+    if fig_dir is not None:
+        fig_dir = Path(fig_dir)
+        fig_dir.mkdir(exist_ok=True, parents=True)
+
+        plt.figure()
+        plot_mask(mask=mask, hop=dgt_params['hop'],
+                  n_bins=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Mask')
+        plt.savefig(fig_dir / 'interp_mask.pdf')
+
+        plt.figure()
+        plotdgtreal(coef=x_tf, a=dgt_params['hop'],
+                    M=dgt_params['n_bins'], fs=signal_params['fs'])
+        plt.title('Interpolated TF matrix')
+        plt.savefig(fig_dir / 'interp_tf_est.pdf')
+
+        plt.figure()
+        plot_spectrogram(x=x_est, dgt_params=dgt_params,
+                         fs=signal_params['fs'])
+        plt.title('Reconstructed signal by interp')
+        plt.savefig(fig_dir / 'interp_sig_est.pdf')
+
+    return x_est
diff --git a/python/tffpy/scripts/script_exp_solve_tff.py b/python/tffpy/scripts/script_exp_solve_tff.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa0ae2a0691e332ec3fb4805b2fb9212ef0182fd
--- /dev/null
+++ b/python/tffpy/scripts/script_exp_solve_tff.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+Run this script to handle the main experiment :class:`SolveTffExperiment`.
+
+.. moduleauthor:: Valentin Emiya
+"""
+from yafe.utils import generate_oar_script
+
+import matplotlib.pyplot as plt
+
+from tffpy.experiments.exp_solve_tff import \
+    SolveTffExperiment, create_and_run_light_experiment
+
+
+try:
+    experiment = SolveTffExperiment.get_experiment(setting='full',
+                                                   force_reset=False)
+except RuntimeError:
+    experiment = None
+except FileNotFoundError:
+    experiment = None
+
+if __name__ == '__main__':
+    answer = 1
+    while answer > 0:
+        input_msg = '\n'.join(['1 - Create and run light experiment',
+                               '2 - Display results of light experiment',
+                               '3 - Full experiment: create full experiment',
+                               '4 - Generate OAR script',
+                               '5 - Full experiment: collect results',
+                               '6 - Full experiment: download results',
+                               '7 - Full experiment: display results',
+                               '8 - Figures for task 12 (bird + car, TFF-1)',
+                               '9 - Figures for task 13 (bird + car, TFF-P)',
+                               '0 - Exit',
+                               ])
+        answer = int(input(input_msg))
+        if answer == 0:
+            break
+        elif answer == 1:
+            create_and_run_light_experiment()
+        elif answer == 2:
+            light_exp = SolveTffExperiment.get_experiment(
+                setting='light', force_reset=False)
+            for idt in range(light_exp.n_tasks):
+                light_exp.plot_task(idt=idt, fontsize=16)
+                plt.close('all')
+            light_exp.plot_results()
+        elif answer == 3:
+            experiment = SolveTffExperiment.get_experiment(
+                setting='full', force_reset=True)
+            experiment.display_status()
+        elif answer == 4:
+            experiment.display_status()
+            batch_size = int(input('Batch size (#tasks per job)?'))
+            generate_oar_script(script_file_path=__file__,
+                                xp_var_name='experiment',
+                                batch_size=batch_size,
+                                oar_walltime='01:00:00',
+                                activate_env_command='source activate py36',
+                                use_gpu=False)
+        elif answer == 5:
+            experiment.collect_results()
+            experiment.display_status()
+        elif answer == 6:
+            to_dir = str(experiment.xp_path)
+            from_dir = \
+                '/data1/home/valentin.emiya/data_exp/SolveTffExperiment/'
+            print('Run:')
+            print(' '.join(['rsync', '-rv',
+                            'valentin.emiya@frontend.lidil.univ-mrs.fr:'
+                            + from_dir,
+                            to_dir]))
+            print('Or (less files):')
+            print(' '.join(['rsync', '-rv',
+                            'valentin.emiya@frontend.lidil.univ-mrs.fr:'
+                            + from_dir
+                            + '*.*',
+                            to_dir]))
+        elif answer == 7:
+            experiment.plot_results()
+            experiment.display_status()
+        elif answer in (8, 9):
+            task_params = {'data_params': {'loc_source': 'bird',
+                                           'wideband_src': 'car'},
+                           'problem_params': {'closing_first': True,
+                                              'delta_loc_db': 40,
+                                              'delta_mix_db': 0,
+                                              'fig_dir': None,
+                                              'n_iter_closing': 3,
+                                              'n_iter_opening': 3,
+                                              'or_mask': True,
+                                              'wb_to_loc_ratio_db': 8,
+                                              'win_choice': 'gauss 256',
+                                              'crop': None},
+                           'solver_params': {'proba_arrf': 0.9999,
+                                             'tolerance_arrf': 0.001}}
+            if answer == 8:
+                task_params['solver_params']['tol_subregions'] = None
+            elif answer == 9:
+                task_params['solver_params']['tol_subregions'] = 1e-05
+            task = experiment.get_task_data_by_params(**task_params)
+            experiment.run_task_by_id(idt=task['id_task'])
+            experiment.plot_task(idt=task['id_task'], fontsize=16)
+        else:
+            print('Unknown answer: ' + str(answer))
diff --git a/python/tffpy/tests/__init__.py b/python/tffpy/tests/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python/tffpy/tests/ci_config.py b/python/tffpy/tests/ci_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ea27000b809d4b96fd9a807a2d56cc06080313f
--- /dev/null
+++ b/python/tffpy/tests/ci_config.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+    Create configuration files for continuous integration.
+
+.. moduleauthor:: Valentin Emiya
+"""
+from configparser import ConfigParser
+from pathlib import Path
+import os
+
+from yafe.utils import ConfigParser as YafeConfigParser
+
+from tffpy.utils import get_config_file, generate_config
+
+
+def create_config_files():
+    config_file = get_config_file()
+    if not config_file.exists():
+        generate_config()
+        config = ConfigParser()
+        config.read(config_file)
+        data_path = Path(__file__).absolute().parents[3] / 'data'
+        print('Data path:', str(data_path))
+        config.set('DATA', 'data_path', str(data_path))
+        config.write(open(config_file, 'w'))
+
+    yafe_config_file = YafeConfigParser._config_path
+    print('Yafe configuration file:', yafe_config_file)
+    if not yafe_config_file.exists():
+        yafe_user_path = Path(os.path.expanduser('~')) / 'yafe_user_path'
+        yafe_logger_path = Path(os.path.expanduser('~')) / 'yafe_logger_path'
+        print(yafe_user_path)
+        print(yafe_logger_path)
+        yafe_user_path.mkdir(parents=True, exist_ok=True)
+        yafe_logger_path.mkdir(parents=True, exist_ok=True)
+        YafeConfigParser.generate_config()
+        yafe_config_parser = YafeConfigParser()
+        yafe_config_parser.set('USER', 'data_path', str(yafe_user_path))
+        yafe_config_parser.set('LOGGER', 'path', str(yafe_logger_path))
+        yafe_config_parser.write(open(yafe_config_file, 'w'))
+
+if __name__ == '__main__':
+    create_config_files()
\ No newline at end of file
diff --git a/python/tffpy/tests/test_create_subregions.py b/python/tffpy/tests/test_create_subregions.py
new file mode 100644
index 0000000000000000000000000000000000000000..6436ef19ea7abe6da075c7608b93cd8f1ecfe37d
--- /dev/null
+++ b/python/tffpy/tests/test_create_subregions.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+import unittest
+from tffpy.datasets import get_mix
+from tffpy.create_subregions import create_subregions
+
+
+class TestCreateSubregions(unittest.TestCase):
+    def test_create_subregions(self):
+        fig_dir = 'fig_create_subregions'
+        x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \
+            get_mix(loc_source='bird',
+                    wideband_src='car',
+                    crop=4096,
+                    win_dur=256 / 8000,
+                    win_type='gauss',
+                    hop_ratio=1 / 4,
+                    n_bins_ratio=4,
+                    n_iter_closing=3,
+                    n_iter_opening=3,
+                    closing_first=True,
+                    delta_mix_db=0,
+                    delta_loc_db=20,
+                    wb_to_loc_ratio_db=16,
+                    or_mask=True,
+                    fig_dir=None)
+        tol = 1e-9
+        mask_with_subregions, norms = create_subregions(
+            mask_bool=mask, dgt_params=dgt_params, signal_params=signal_params,
+            tol=tol, fig_dir=fig_dir, return_norms=True)
+
+        tol = 1e-5
+        mask_with_subregions = create_subregions(
+            mask_bool=mask, dgt_params=dgt_params, signal_params=signal_params,
+            tol=tol, fig_dir=None, return_norms=False)
diff --git a/python/tffpy/tests/test_datasets.py b/python/tffpy/tests/test_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c1c894b02b1822d2ff326ef13158f135e61c884
--- /dev/null
+++ b/python/tffpy/tests/test_datasets.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""Test of the module :module:`tffpy.datasets`
+
+.. moduleauthor:: Valentin Emiya
+"""
+import unittest
+import numpy as np
+
+from tffpy.datasets import get_mix, get_dataset
+from tffpy.utils import snr
+
+
+class TestGetMix(unittest.TestCase):
+
+    def setUp(self):
+        dataset = get_dataset()
+        self.loc_source_list = list(dataset['localized'].keys())
+        self.wb_source_list = list(dataset['wideband'].keys())
+
+    def test_snr(self):
+        loc_source = self.loc_source_list[0]
+        wb_source = self.wb_source_list[0]
+        for wb_to_loc_ratio_db in [-10, -3, 0, 6]:
+            x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \
+                get_mix(loc_source=loc_source, wideband_src=wb_source,
+                        wb_to_loc_ratio_db=wb_to_loc_ratio_db,
+                        win_dur=128 / 8000, win_type='gauss',
+                        hop_ratio=1 / 4, n_bins_ratio=4, n_iter_closing=2,
+                        n_iter_opening=2, delta_mix_db=0, delta_loc_db=30,
+                        closing_first=True, fig_dir=None)
+            np.testing.assert_array_almost_equal(x_mix, x_loc + x_wb)
+            np.testing.assert_almost_equal(snr(x_signal=x_wb, x_noise=x_loc),
+                                           wb_to_loc_ratio_db)
+
diff --git a/python/tffpy/tests/test_interpolation_solver.py b/python/tffpy/tests/test_interpolation_solver.py
new file mode 100644
index 0000000000000000000000000000000000000000..e18a321928b4b5a92a3c7b31b81ea08779df7557
--- /dev/null
+++ b/python/tffpy/tests/test_interpolation_solver.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+import unittest
+
+import numpy as np
+
+from tffpy.datasets import get_mix
+from tffpy.interpolation_solver import solve_by_interpolation
+
+
+class TestInterpolationSolver(unittest.TestCase):
+    def test_interpolation_solver(self):
+        win_type = 'gauss'
+        win_dur = 256 / 8000
+        hop_ratio = 1 / 4
+        n_bins_ratio = 4
+        delta_mix_db = 0
+        delta_loc_db = 30
+        n_iter_closing = n_iter_opening = 3
+        wb_to_loc_ratio_db = 8
+        closing_first = True
+        or_mask = True
+
+        fig_dir = 'test_fig_interpolation'
+
+        x_mix, dgt_params, signal_params, mask, x_bird, x_engine = \
+            get_mix(loc_source='bird', wideband_src='car', crop=4096,
+                    wb_to_loc_ratio_db=wb_to_loc_ratio_db,
+                    win_dur=win_dur, win_type=win_type,
+                    hop_ratio=hop_ratio, n_bins_ratio=n_bins_ratio,
+                    n_iter_closing=n_iter_closing,
+                    n_iter_opening=n_iter_opening,
+                    closing_first=closing_first,
+                    delta_mix_db=delta_mix_db, delta_loc_db=delta_loc_db,
+                    or_mask=or_mask, fig_dir=fig_dir)
+
+        x_est = solve_by_interpolation(x_mix, mask, dgt_params, signal_params,
+                                       fig_dir)
+        np.testing.assert_array_equal(x_est.shape, x_mix.shape)
+
+        x_est = solve_by_interpolation(x_mix, mask, dgt_params, signal_params)
+        np.testing.assert_array_equal(x_est.shape, x_mix.shape)
diff --git a/python/tffpy/tests/test_tf_fading.py b/python/tffpy/tests/test_tf_fading.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee8616971a73d3e89ec5ea82fa8d98461d878f1f
--- /dev/null
+++ b/python/tffpy/tests/test_tf_fading.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+import unittest
+
+import matplotlib.pyplot as plt
+
+from tffpy.datasets import get_mix
+from tffpy.tf_fading import estimate_energy_in_mask
+
+
+class TestEstimateEnergyInMask(unittest.TestCase):
+    def test_estimate_energy_in_mask(self):
+        fig_dir = 'fig_energy_estimation'
+        x_mix, dgt_params, signal_params, mask, x_loc, x_wb = \
+            get_mix(loc_source='bird',
+                    wideband_src='car',
+                    crop=None,
+                    win_dur=256 / 8000,
+                    win_type='gauss',
+                    hop_ratio=1 / 4,
+                    n_bins_ratio=4,
+                    n_iter_closing=3,
+                    n_iter_opening=3,
+                    closing_first=True,
+                    delta_mix_db=0,
+                    delta_loc_db=40,
+                    wb_to_loc_ratio_db=8,
+                    or_mask=True,
+                    fig_dir=fig_dir)
+        plt.close('all')
+
+        estimated_energy = estimate_energy_in_mask(
+            x_mix=x_mix, mask=mask, dgt_params=dgt_params,
+            signal_params=signal_params, fig_dir=fig_dir, prefix=None)
+        plt.close('all')
diff --git a/python/tffpy/tests/test_utils.py b/python/tffpy/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..035f7bf6816da80f2e412bb15f5b7917972c7d3f
--- /dev/null
+++ b/python/tffpy/tests/test_utils.py
@@ -0,0 +1,92 @@
+import unittest
+from unittest.mock import patch
+import tempfile
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+from pathlib import Path
+from configparser import ConfigParser
+
+from tffpy.utils import generate_config, get_data_path
+
+
+class TestGenerateConfig(unittest.TestCase):
+    def test_generate_config(self):
+        with patch('tffpy.utils.get_config_file') as mock:
+            mock.return_value = Path(tempfile.mkdtemp()) / 'tffpy.conf'
+            config_file = mock.return_value
+            self.assertFalse(config_file.exists())
+            generate_config()
+            self.assertTrue(config_file.exists())
+
+
+class TestGetDataPath(unittest.TestCase):
+    def test_get_data_path(self):
+        with patch('tffpy.utils.get_config_file') as mock:
+            mock.return_value = Path(tempfile.mkdtemp()) / 'tffpy.conf'
+            config_file = mock.return_value
+
+            self.assertFalse(config_file.exists())
+            with self.assertRaises(Exception):
+                get_data_path()
+
+            generate_config()
+            with self.assertRaises(Exception):
+                get_data_path()
+
+            config = ConfigParser()
+            config.read(config_file)
+            true_data_path = Path(__file__).absolute().parents[3] / 'data'
+            print(true_data_path)
+            self.assertTrue(true_data_path.exists())
+            print('Data path:', str(true_data_path))
+            config.set('DATA', 'data_path', str(true_data_path))
+            config.write(open(config_file, 'w'))
+            tested_data_path = get_data_path()
+            self.assertEqual(tested_data_path, true_data_path)
diff --git a/python/tffpy/tf_fading.py b/python/tffpy/tf_fading.py
new file mode 100644
index 0000000000000000000000000000000000000000..7051633272272dc5e2f211f5b400245a8f89d6b6
--- /dev/null
+++ b/python/tffpy/tf_fading.py
@@ -0,0 +1,445 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""
+Class :class:`GabMulTff` is the main object to solve a time-frequency fading
+problem.
+
+.. moduleauthor:: Valentin Emiya
+"""
+from time import perf_counter
+from pathlib import Path
+
+import numpy as np
+from scipy.optimize import minimize_scalar, minimize
+from matplotlib import pyplot as plt
+from ltfatpy import plotdgtreal
+
+from skpomade.range_approximation import \
+    adaptive_randomized_range_finder, randomized_range_finder
+from skpomade.factorization_construction import evd_nystrom
+
+from tffpy.tf_tools import GaborMultiplier
+from tffpy.create_subregions import create_subregions
+from tffpy.utils import dgt, plot_spectrogram, db
+
+
+class GabMulTff:
+    """
+    Time-frequency fading using Gabor multipliers
+
+    Main object to solve the TFF problem
+
+    Parameters
+    ----------
+    x_mix : nd-array
+        Mix signal
+    mask : nd-array
+        Time-frequency mask
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    tol_subregions : None or float
+        If None, the mask is considered as a single region. If float,
+        tolerance to split the mask into sub-regions using
+        :py:func:`~tffpy.create_subregions.create_subregions`.
+    fig_dir : str or Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+    """
+
+    def __init__(self, x_mix, mask, dgt_params, signal_params,
+                 tol_subregions=None, fig_dir=None):
+        self.x_mix = x_mix
+        self.dgt_params = dgt_params
+        self.signal_params = signal_params
+        self.tol_subregions = tol_subregions
+        self.t_subreg = None
+        if tol_subregions is not None:
+            if np.issubdtype(mask.dtype, np.bool_):
+                t0 = perf_counter()
+                mask = create_subregions(mask_bool=mask,
+                                         dgt_params=dgt_params,
+                                         signal_params=signal_params,
+                                         tol=tol_subregions,
+                                         fig_dir=fig_dir)
+                self.t_subreg = perf_counter() - t0
+            n_areas = np.unique(mask).size - 1
+            self.mask = mask
+        else:
+            n_areas = 1
+            self.mask = mask > 0
+        self.gabmul_list = [GaborMultiplier(mask=(mask == i + 1),
+                                            dgt_params=dgt_params,
+                                            signal_params=signal_params)
+                            for i in range(n_areas)]
+        self.s_vec_list = [None for i in range(n_areas)]
+        self.u_mat_list = [None for i in range(n_areas)]
+        self.uh_x_list = [None for i in range(n_areas)]
+        self.t_arrf = [None for i in range(n_areas)]
+        self.t_evdn = [None for i in range(n_areas)]
+        self.t_uh_x = [None for i in range(n_areas)]
+        self.fig_dir = fig_dir
+        if fig_dir is not None:
+            fig_dir = Path(fig_dir)
+            fig_dir.mkdir(parents=True, exist_ok=True)
+
+    @property
+    def n_areas(self):
+        """
+        Number of sub-regions
+        """
+        return len(self.u_mat_list)
+
+    def compute_decomposition(self, tolerance_arrf, proba_arrf):
+        """
+        Decompose each Gabor multiplier using a random EVD
+
+        The decomposition is obtained using
+        :py:func:`skpomade.range_approximation.adaptive_randomized_range_finder`
+        followed by :py:func:`skpomade.factorization_construction.evd_nystrom`.
+        The rank of each decomposition is estimated using parameters
+        `tolerance_arrf` and `proba_arrf`.
+        Running times to compute the range approximation, the
+        EVD itself and the additional matrix products for subsequent
+        computations are stored in attributes `t_arrf`, `t_evdn` and
+        `t_uh_x`, respectively.
+
+        Parameters
+        ----------
+        tolerance_arrf : float
+            Tolerance for
+            :py:func:`~skpomade.range_approximation.adaptive_randomized_range_finder`
+        proba_arrf : float
+            Probability of error for
+            :py:func:`~skpomade.range_approximation.adaptive_randomized_range_finder`
+
+        """
+        for i in range(self.n_areas):
+            print('Random EVD of Gabor multiplier #{}'.format(i))
+            print('#coefs in mask: {} ({:.1%} missing)'
+                  .format(np.sum(self.gabmul_list[i].mask),
+                          np.sum(self.gabmul_list[i].mask)
+                          / self.gabmul_list[i].mask.size))
+            t0 = perf_counter()
+            q_mat = adaptive_randomized_range_finder(a=self.gabmul_list[i],
+                                                     tolerance=tolerance_arrf,
+                                                     proba=proba_arrf, r=None,
+                                                     rand_state=0, n_cols_Q=32)
+            self.t_arrf[i] = perf_counter() - t0
+            print('Q shape:', q_mat.shape)
+            t0 = perf_counter()
+            self.s_vec_list[i], self.u_mat_list[i] = \
+                evd_nystrom(a=self.gabmul_list[i], q_mat=q_mat)
+            self.t_evdn[i] = perf_counter() - t0
+            print('Running times:')
+            print('   - adaptive_randomized_range_finder: {} s'.format(
+                self.t_arrf[i]))
+            print('   - evd_nystrom: {} s'.format(self.t_evdn[i]))
+
+            t0 = perf_counter()
+            self.uh_x_list[i] = self.u_mat_list[i].T.conj() @ self.x_mix
+            self.t_uh_x[i] = perf_counter() - t0
+
+    def compute_decomposition_fixed_rank(self, rank):
+        """
+        Decompose each Gabor multiplier using a random EVD with given rank
+
+        The decomposition is obtained using
+        :py:func:`skpomade.range_approximation.randomized_range_finder`
+        followed by :py:func:`skpomade.factorization_construction.evd_nystrom`.
+        Running times are stored in attributes `t_rrf`, `t_evdn` and `t_uh_x`.
+
+        Parameters
+        ----------
+        rank : int
+            Rank of the decompostion
+        """
+        t_rrf = [None for i in range(self.n_areas)]
+        t_evdn = [None for i in range(self.n_areas)]
+        t_uh_x = [None for i in range(self.n_areas)]
+        for i in range(self.n_areas):
+            print('Random EVD of Gabor multiplier #{}'.format(i))
+            print('#coefs in mask: {} ({:.1%})'
+                  .format(np.sum(self.gabmul_list[i].mask),
+                          np.sum(self.gabmul_list[i].mask)
+                          / self.gabmul_list[i].mask.size))
+            t0 = perf_counter()
+            q_mat = randomized_range_finder(a=self.gabmul_list[i],
+                                            n_l=rank,
+                                            rand_state=0)
+            t_rrf[i] = perf_counter() - t0
+            print('Q shape:', q_mat.shape)
+            t0 = perf_counter()
+            self.s_vec_list[i], self.u_mat_list[i] = \
+                evd_nystrom(a=self.gabmul_list[i], q_mat=q_mat)
+            t_evdn[i] = perf_counter() - t0
+            print('Running times:')
+            print('   - randomized_range_finder: {} s'.format(t_rrf[i]))
+            print('   - evd_nystrom: {} s'.format(t_evdn[i]))
+
+            t0 = perf_counter()
+            self.uh_x_list[i] = self.u_mat_list[i].T.conj() @ self.x_mix
+            t_uh_x[i] = perf_counter() - t0
+
+    def compute_estimate(self, lambda_coef):
+        """
+        Compute the signal estimate for a given hyperparameter
+        :math:`\lambda_i` for each sub-region :math:`i`.
+
+        Prior decomposition should have been performed using
+        :meth:`compute_decomposition` or
+        :meth:`compute_decomposition_fixed_rank`.
+
+        Parameters
+        ----------
+        lambda_coef : nd-array or float
+            If nd-array, hyperparameters :math:`\lambda_i` for each sub-region
+            :math:`i`. If float, the same value :math:`\lambda` is used
+            for each sub-region.
+
+        Returns
+        -------
+        nd-array
+            Reconstructed signal
+        """
+        if isinstance(lambda_coef, np.ndarray):
+            assert lambda_coef.size == self.n_areas
+        else:
+            lambda_coef = np.full(self.n_areas, fill_value=lambda_coef)
+        x = self.x_mix.copy()
+        for i in range(self.n_areas):
+            gamma_vec = lambda_coef[i] * self.s_vec_list[i] \
+                        / (1 - (1 - lambda_coef[i]) * self.s_vec_list[i])
+            x -= self.u_mat_list[i] @ (gamma_vec * self.uh_x_list[i])
+        return x
+
+    def compute_lambda(self, x_mix, e_target=None):
+        """
+        Estimate hyperparameters :math:`\lambda_i` from target energy in each
+        sub-region :math:`i`.
+
+        Parameters
+        ----------
+        x_mix : nd-array
+            Mix signal
+
+        e_target : nd-array or None
+            Target energy for each sub-region/. If None, function
+            :py:func:`estimate_energy_in_mask` is used to estimate the
+            target energies.
+
+        Returns
+        -------
+        lambda_est : nd-array
+            Hyperparameters :math:`\lambda_i` for each sub-region :math:`i`.
+        t_est : nd-array
+            Running time to estimate each hyperparameter.
+        """
+        if e_target is None:
+            e_target = estimate_energy_in_mask(
+                x_mix=x_mix, mask=self.mask,
+                dgt_params=self.dgt_params, signal_params=self.signal_params,
+                fig_dir=self.fig_dir)
+        t_est = np.empty(self.n_areas)
+        lambda_est = np.empty(self.n_areas)
+        for i_area in range(self.n_areas):
+            mask_i = self.mask == i_area + 1
+
+            def obj_fun_est(lambda_coef):
+                x = self.compute_estimate(lambda_coef)
+                x_tf_masked = mask_i * self.gabmul_list[i_area].dgt(x)
+                e_mask = np.linalg.norm(x_tf_masked, 'fro') ** 2
+                return np.abs(e_target[i_area] - e_mask)
+
+            t0 = perf_counter()
+            sol_est = minimize_scalar(obj_fun_est, bracket=[0, 1],
+                                      method='brent')
+            t_est[i_area] = perf_counter() - t0
+            lambda_est[i_area] = sol_est.x
+        return lambda_est, t_est
+
+
+def reconstruction(x_mix, lambda_coef, u_mat, s_vec):
+    return GabMulTff(x_mix=x_mix, u_mat=u_mat, s_vec=s_vec)(lambda_coef)
+
+
+def estimate_energy_in_mask(x_mix, mask, dgt_params, signal_params,
+                            fig_dir=None, prefix=None):
+    """
+    Estimate energy in time-frequency mask
+
+    Parameters
+    ----------
+    x_mix : nd-array
+        Mix signal
+    mask: nd-array
+        Time-frequency mask for each sub-region
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    fig_dir : str or Path
+        If not None, folder where figures are stored. If None, figures are
+        not plotted.
+    prefix : str
+        If not None, this prefix is used when saving the figures.
+
+    Returns
+    -------
+    nd-array
+        Estimated energy in each sub-region.
+    """
+    x_tf_mat = dgt(sig=x_mix, dgt_params=dgt_params)
+    x_tf_mat[mask > 0] = np.nan
+    e_f_mean = np.nanmean(np.abs(x_tf_mat) ** 2, axis=1)
+
+    mask = mask.astype(int)
+    n_areas = np.unique(mask).size - 1
+    estimated_energy = np.empty(n_areas)
+    e_mat = e_f_mean[:, None] @ np.ones((1, x_tf_mat.shape[1]))
+    e_mat[mask == 0] = 0
+    for i_area in range(n_areas):
+        mask_i = mask == i_area + 1
+        estimated_energy[i_area] = np.sum(e_mat * mask_i)
+
+    if fig_dir is not None:
+        fig_dir = Path(fig_dir)
+        fig_dir.mkdir(parents=True, exist_ok=True)
+        if prefix is None:
+            prefix = ''
+        else:
+            prefix = prefix + '_'
+        dynrange = 100
+        c_max = np.nanmax(db(x_tf_mat))
+        clim = c_max - dynrange, c_max
+
+        fs = signal_params['fs']
+        plt.figure()
+        plot_spectrogram(x=x_mix, dgt_params=dgt_params, fs=fs, clim=clim)
+        plt.title('Mix')
+        plt.savefig(fig_dir / '{}mix.pdf'.format(prefix))
+
+        plt.figure()
+        plotdgtreal(coef=np.sqrt(e_mat), a=dgt_params['hop'],
+                    M=dgt_params['n_bins'], fs=fs, clim=clim)
+        plt.title('Mask filled with average energy (total: {})'
+                  .format(estimated_energy))
+        plt.savefig(fig_dir / '{}filled_mask.pdf'.format(prefix))
+
+        x_tf_mat[mask > 0] = np.sqrt(e_mat[mask > 0])
+        plt.figure()
+        plotdgtreal(coef=x_tf_mat, a=dgt_params['hop'],
+                    M=dgt_params['n_bins'], fs=fs, clim=clim)
+        plt.title('Mix filled with average energy (total: {})'
+                  .format(estimated_energy))
+        plt.savefig(fig_dir / '{}filled_mask.pdf'.format(prefix))
+
+        plt.figure()
+        plt.plot(db(e_f_mean) / 2)
+        plt.xlabel('Frequency index')
+        plt.ylabel('Average energy')
+        plt.title('Average energy per frequency bin in mix')
+        plt.savefig(fig_dir / '{}average_energy.pdf'.format(prefix))
+
+        e_f_mean_check = np.mean(np.abs(x_tf_mat) ** 2, axis=1)
+        plt.figure()
+        plt.plot(db(e_f_mean) / 2, label='Before filling')
+        plt.plot(db(e_f_mean_check) / 2, '--', label='After filling')
+        plt.xlabel('Frequency index')
+        plt.ylabel('Average energy')
+        plt.title('Average energy per frequency bin in mix')
+        plt.legend()
+        plt.savefig(fig_dir / '{}average_energy_check.pdf'.format(prefix))
+
+    return estimated_energy
+
+
+def compute_lambda_oracle_sdr(gmtff, x_wb):
+    """
+    Compute oracle value for hyperparameter :math:`\lambda_i` from true
+    solution.
+
+    If only one region is considered, the Brent's algorithm is used (see
+    :py:func:`scipy.optimize.minimize_scalar`). If multiple sub-regions are
+    considered the BFGS
+    algorithm is used (see :py:func:`scipy.optimize.minimize`).
+
+    Parameters
+    ----------
+    gmtff : GabMulTff
+    x_wb : nd-array
+        True signal for the wideband source.
+
+    Returns
+    -------
+    lambda_oracle : nd-array
+        Oracle values for hyperparameters :math:`\lambda_i` for each
+        sub-region :math:`i`.
+    t_oracle : nd-array
+        Running times for the computation of each hyperparameter
+    """
+    t0 = perf_counter()
+
+    def obj_fun_oracle(lambda_coef):
+        return np.linalg.norm(x_wb - gmtff.compute_estimate(lambda_coef))
+
+    if gmtff.tol_subregions is None:
+        sol_oracle = minimize_scalar(obj_fun_oracle,
+                                     bracket=[0, 1], method='brent')
+        lambda_oracle = np.array([sol_oracle.x])
+    else:
+        sol_oracle = minimize(obj_fun_oracle,
+                              np.ones(gmtff.n_areas),
+                              method='BFGS',
+                              options={'disp': True})
+        lambda_oracle = sol_oracle.x
+    t_oracle = perf_counter() - t0
+    return lambda_oracle, t_oracle
diff --git a/python/tffpy/tf_tools.py b/python/tffpy/tf_tools.py
new file mode 100755
index 0000000000000000000000000000000000000000..52c57985372192d55ba1bc3a7951ce4d5315c944
--- /dev/null
+++ b/python/tffpy/tf_tools.py
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+""" Base functions and classes.
+
+.. moduleauthor:: Valentin Emiya
+
+"""
+import warnings
+
+import numpy as np
+from ltfatpy import dgtreal, idgtreal, arg_firwin, gabwin, plotdgtreal
+from scipy.sparse.linalg import LinearOperator
+
+from tffpy.utils import plot_mask, plot_win
+
+
+def get_dgt_params(win_type, approx_win_len, hop, n_bins,
+                   phase_conv='freqinv', sig_len=None):
+    """
+    Build dictionary of DGT parameter
+
+    The output dictionary `dgt_params` is composed of:
+
+    * `dgt_params['win']`: the window array (nd-array)
+    * `dgt_params['hop']`: the hop size (int)
+    * `dgt_params['n_bins']`: the number of frequency bins (int)
+    * `dgt_params['input_win_len']`: the effective window length (input window
+      length rounded to the nearest power of two).
+    * `dgt_params['phase_conv']`: the phase convention `'freqinv'` or
+      `'timeinv'`, see `pt` argument in :py:func:`ltfatpy.gabor.dgtreal`
+
+    Parameters
+    ----------
+    win_type : str
+        Window name, e.g. 'hann', 'gauss' (see :py:func:`ltfatpy.arg_firwin`)
+    approx_win_len : int
+        Approximate window length
+    hop : int
+        Hop size
+    n_bins : int
+        Number of frequency bins
+    phase_conv : 'freqinv' or 'timeinv'
+        Phase convention
+    sig_len : int
+        Signal length
+
+    Returns
+    -------
+    dict
+        DGT parameters (see above)
+    """
+    supported_wins = arg_firwin() | {'gauss'}
+    msg = '{} not supported, try {}'.format(win_type, supported_wins)
+    assert win_type in supported_wins, msg
+    msg = 'Signal length should be given if win_type is "gauss"'
+    assert win_type != 'gauss' or sig_len is not None, msg
+
+    input_win_len = int(2 ** np.round(np.log2(approx_win_len)))
+    if input_win_len != approx_win_len:
+        warnings.warn('Input window length {} has been changed to {}.'
+                      .format(approx_win_len, input_win_len))
+
+    if win_type == 'gauss':
+        tfr = float((np.pi * input_win_len ** 2) / (4 * sig_len * np.log(2)))
+        win, info = gabwin(g={'name': ('tight', 'gauss'), 'tfr': tfr},
+                           a=hop, M=n_bins, L=sig_len)
+    else:
+        win, info = gabwin(g={'name': ('tight', win_type), 'M': input_win_len},
+                           a=hop, M=n_bins, L=sig_len)
+    return dict(win=win, hop=hop, n_bins=n_bins, input_win_len=input_win_len,
+                phase_conv=phase_conv)
+
+
+def get_signal_params(sig_len, fs):
+    """
+    Build dictionary of DGT parameter
+
+    The output dictionary `signal_params` is composed of:
+
+    * `signal_params['sig_len']` : the signal length
+    * `signal_params['fs']` : the sampling frequency
+
+    This function is only embedding the input parameters into a dictionary
+    without changing their values.
+
+    Parameters
+    ----------
+    sig_len : int
+        Signal length
+    fs : int
+        Sampling frequency
+
+    Returns
+    -------
+    dict
+        See above
+    """
+    return dict(sig_len=sig_len, fs=fs)
+
+
+class GaborMultiplier(LinearOperator):
+    """
+    Gabor multipliers
+
+    Parameters
+    ----------
+    mask : nd-array
+        Time-frequency mask
+    dgt_params : dict
+        DGT parameters
+    signal_params : dict
+        Signal parameters
+    """
+
+    def __init__(self, mask, dgt_params, signal_params):
+        self.sig_len = signal_params['sig_len']
+        LinearOperator.__init__(self,
+                                dtype=np.float,
+                                shape=(self.sig_len, self.sig_len))
+        self.win = dgt_params['win']
+        self.hop = dgt_params['hop']
+        self.n_bins = dgt_params['n_bins']
+        self.fs = signal_params['fs']
+        self.phase_conv = dgt_params['phase_conv']
+        assert mask.shape[0] == self.n_bins // 2 + 1
+        assert mask.shape[1] == self.sig_len // self.hop
+        self.mask = mask
+
+    # @property
+    # def shape(self):
+    #     return self.sig_len, self.sig_len
+
+    def _adjoint(self):
+        """
+        Adjoint of the Gabor multiplier
+
+        Note that since the Gabor multiplier is self-adjoint, this method
+        returns the object itself.
+
+        Returns
+        -------
+        GaborMultiplier
+        """
+        return self
+
+    def _matvec(self, x):
+        if x.ndim == 2:
+            x = x.reshape(-1)
+        return self.idgt(tf_mat=self.dgt(sig=x) * self.mask)
+
+    def dgt(self, sig):
+        """
+        Apply the DGT related to the Gabor multiplier
+
+        Parameters
+        ----------
+        sig : nd-array
+            Real signal to be transformed
+
+        Returns
+        -------
+        nd-array
+            DGT coefficients
+        """
+        return dgtreal(f=sig, g=self.win, a=self.hop, M=self.n_bins,
+                       L=self.sig_len, pt=self.phase_conv)[0]
+
+    def idgt(self, tf_mat):
+        """
+        Apply the invers DGT related to the Gabor multiplier
+
+        Parameters
+        ----------
+        tf_mat : nd-array
+            Time-frequency coefficients (non-negative frequencies only)
+        Returns
+        -------
+        nd-array
+            Real signal
+        """
+        return idgtreal(coef=tf_mat, g=self.win, a=self.hop, M=self.n_bins,
+                        Ls=self.sig_len, pt=self.phase_conv)[0]
+
+    def plot_win(self, label=None):
+        """
+        Plot the window in the current figure.
+
+        Parameters
+        ----------
+        label : str or None
+            If not None, label to be assigned to the curve.
+        """
+        plot_win(win=self.win, fs=self.fs, label=label)
+
+    def plot_mask(self):
+        """
+        Plot the time-frequency mask
+        """
+        plot_mask(mask=self.mask, hop=self.hop, n_bins=self.n_bins, fs=self.fs)
+
+    def compute_ambiguity_function(self, fftshift=True):
+        """
+        Compute the ambiguity function of the window
+
+        Parameters
+        ----------
+        fftshift : bool
+            If true, shift the window in time before computing its DGT.
+        """
+        if fftshift:
+            w = self.win.copy()
+            return self.dgt(np.fft.fftshift(w))
+        else:
+            return self.dgt(self.win)
+
+    def plot_ambiguity_function(self, dynrange=100, fftshift=True):
+        """
+        Plot the ambiguity function of the window in the current figure.
+
+        Parameters
+        ----------
+        dynrange : float
+            Dynamic range to be displayed
+        fftshift : bool
+            If true, shift the window in time before computing its DGT.
+        """
+        plotdgtreal(
+            coef=self.compute_ambiguity_function(fftshift=fftshift),
+            a=self.hop, M=self.n_bins, fs=self.fs, dynrange=dynrange)
+
+
+def generate_rectangular_mask(n_bins, hop, sig_len, t_lim, f_lim):
+    """
+    Generate a rectangular time-frequency mask
+
+    Parameters
+    ----------
+    n_bins : int
+        Number of frequency bins
+    hop : int
+        Hop size
+    sig_len : int
+        Signal length
+    t_lim : sequence (2,)
+        Time boundaries of the mask
+    f_lim : sequence (2,)
+        Frequency boundaries of the mask
+
+    Returns
+    -------
+    nd-array
+        The boolean 2D array containing the time-frequency mask (True values)
+    """
+    f_lim = np.array(f_lim)
+    t_lim = np.array(t_lim)
+    mask = np.zeros((n_bins // 2 + 1, sig_len // hop), dtype=bool)
+    if np.issubdtype(f_lim.dtype, np.dtype(float).type):
+        f_lim = np.round(f_lim * mask.shape[0]).astype(int)
+    if np.issubdtype(t_lim.dtype, np.dtype(float).type):
+        t_lim = np.round(t_lim * mask.shape[1]).astype(int)
+    mask[f_lim[0]:f_lim[1], t_lim[0]:t_lim[1]] = True
+    return mask
diff --git a/python/tffpy/utils.py b/python/tffpy/utils.py
new file mode 100755
index 0000000000000000000000000000000000000000..67b36ec609d962fec16061c7e6fda9ee7b5886e3
--- /dev/null
+++ b/python/tffpy/utils.py
@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+# ######### COPYRIGHT #########
+# Credits
+# #######
+#
+# Copyright(c) 2020-2020
+# ----------------------
+#
+# * Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>
+# * Université d'Aix-Marseille <http://www.univ-amu.fr/>
+# * Centre National de la Recherche Scientifique <http://www.cnrs.fr/>
+# * Université de Toulon <http://www.univ-tln.fr/>
+#
+# Contributors
+# ------------
+#
+# * `Valentin Emiya <mailto:valentin.emiya@lis-lab.fr>`_
+# * `Ama Marina Krémé <mailto:ama-marina.kreme@lis-lab.fr>`_
+#
+# This package has been created thanks to the joint work with Florent Jaillet
+# and Ronan Hamon on other packages.
+#
+# Description
+# -----------
+#
+# Time frequency fading using Gabor multipliers
+#
+# Version
+# -------
+#
+# * tffpy version = 0.1.3
+#
+# Licence
+# -------
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# ######### COPYRIGHT #########
+"""Utils classes and functions for tffpy.
+
+.. moduleauthor:: Valentin Emiya
+"""
+import os
+from pathlib import Path
+from configparser import ConfigParser
+import numpy as np
+import matplotlib.pyplot as plt
+
+from ltfatpy import plotdgtreal, dgtreal, idgtreal
+
+
+def plot_mask(mask, hop, n_bins, fs):
+    """
+    Plot time-frequency mask
+
+    Parameters
+    ----------
+    mask : nd-array
+        Time-frequency mask
+    hop : int
+        Hop size
+    n_bins : int
+        Number of frequency bins
+    fs : int
+        Sampling frequency
+    """
+    plotdgtreal(coef=mask.astype(float), a=hop, M=n_bins, fs=fs,
+                normalization='lin')
+
+
+def plot_win(win, fs, label=None):
+    """
+    Plot window
+
+    Parameters
+    ----------
+    win : nd-array
+        Window array
+    fs : int
+        Sampling frequency
+    label : str or None
+        If not None, label to be assigned to the curve.
+    """
+    x_range = np.fft.fftshift(np.arange(win.size) / fs)
+    x_range[x_range > x_range[-1]] -= x_range.size / fs
+    if label is None:
+        plt.plot(x_range, np.fft.fftshift(win))
+    else:
+        plt.plot(x_range, np.fft.fftshift(win), label=str(label))
+        plt.legend()
+    plt.xlabel('Time (s)')
+    plt.grid()
+
+
+def plot_spectrogram(x, dgt_params, fs, dynrange=100, clim=None):
+    """
+    Plot spectrogram of a signal
+
+    Parameters
+    ----------
+    x : nd-array
+        Signal
+    dgt_params : dict
+        DGT parameters (see `tffpy.tf_tools.get_dgt_params`)
+    fs : int
+        Sampling frequency
+    dynrange : float
+        Dynamic range to be displayed.
+    clim : sequence
+        Min and max values for the colorbar. If both `clim` and `dynrange` are
+        specified, then clim takes precedence.
+    """
+    tf_mat = dgt(x, dgt_params=dgt_params)
+    plotdgtreal(coef=tf_mat, a=dgt_params['hop'], M=dgt_params['n_bins'],
+                fs=fs, dynrange=dynrange, clim=clim)
+
+
+def db(x):
+    """
+    Linear to decibel (dB) conversion
+
+    Parameters
+    ----------
+    x : scalar or nd-array
+        Values to be converted
+
+    Returns
+    -------
+    scalar or nd-array
+        Conversion of input `x` in dB.
+    """
+    return 20 * np.log10(np.abs(x))
+
+
+def sdr(x_ref, x_est):
+    """
+    Signal to distortion ratio
+
+    Parameters
+    ----------
+    x_ref : nd-array
+        Reference signal
+    x_est : nd-array
+        Estimation of the reference signal
+
+    Returns
+    -------
+    float
+    """
+    return snr(x_signal=x_ref, x_noise=x_est - x_ref)
+
+
+def snr(x_signal, x_noise):
+    """
+    Signal to noise ratio
+
+    Parameters
+    ----------
+    x_signal : nd-array
+        Signal of interest
+    x_noise : nd-array
+        Noise signal
+
+    Returns
+    -------
+    float
+    """
+    return db(np.linalg.norm(x_signal)) - db(np.linalg.norm(x_noise))
+
+
+def is_div_spectrum(x_ref, x_est):
+    """
+    Itakura-Saito divergence computed via discrete Fourier transform
+
+    Parameters
+    ----------
+    x_ref : nd-array
+        Reference signal
+    x_est : nd-array
+        Estimation of the reference signal
+
+    Returns
+    -------
+    float
+    """
+    return is_div(x_ref=np.abs(np.fft.fft(x_ref)),
+                  x_est=np.abs(np.fft.fft(x_est)))
+
+
+def is_div(x_ref, x_est):
+    """
+    Itakura-Saito divergence
+
+    Parameters
+    ----------
+    x_ref : nd-array
+        Reference array
+    x_est : nd-array
+        Estimation of the reference array
+
+    Returns
+    -------
+    float
+    """
+    x_ratio = x_ref / x_est
+    return np.sum(x_ratio - np.log(x_ratio)) - np.size(x_ratio)
+
+
+def dgt(sig, dgt_params):
+    """
+    Discrete Gabor transform of a signal
+
+    Parameters
+    ----------
+    sig : nd-array
+        Input signal
+    dgt_params : dict
+        DGT parameters (see `tffpy.tf_tools.get_dgt_params`)
+
+    Returns
+    -------
+    nd-array
+        DGT coefficients
+    """
+    return dgtreal(f=sig, g=dgt_params['win'], a=dgt_params['hop'],
+                   M=dgt_params['n_bins'], L=sig.shape[0],
+                   pt=dgt_params['phase_conv'])[0]
+
+
+def idgt(tf_mat, dgt_params, sig_len):
+    """
+    Inverse discrete Gabor transform
+
+    Parameters
+    ----------
+    tf_mat : nd-array
+        DGT coefficients
+    dgt_params : dict
+        DGT parameters (see `tffpy.tf_tools.get_dgt_params`)
+    sig_len : int
+        Signal length
+
+    Returns
+    -------
+    nd-array
+        Reconstructed signal
+    """
+    return idgtreal(coef=tf_mat, g=dgt_params['win'], a=dgt_params['hop'],
+                    M=dgt_params['n_bins'], Ls=sig_len,
+                    pt=dgt_params['phase_conv'])[0]
+
+
+def get_config_file():
+    """
+    User configuration file
+
+    Returns
+    -------
+    Path
+    """
+    return Path(os.path.expanduser('~')) / '.config' / 'tffpy.conf'
+
+
+def generate_config():
+    """
+    Generate an empty configuration file.
+    """
+
+    config = ConfigParser(allow_no_value=True)
+
+    config.add_section('DATA')
+    config.set('DATA', '# path to data')
+    config.set('DATA', 'data_path', '/to/be/completed/tffpy/data')
+    config_file = get_config_file()
+    with open(config_file, 'w') as file:
+        config.write(file)
+    print('Configuration file created: {}. Please update it with your data '
+          'path.'.format(config_file))
+
+
+def get_data_path():
+    """
+    Read data folder from user configuration file.
+
+    Returns
+    -------
+    Path
+    """
+    config_file = get_config_file()
+    if not config_file.exists():
+        raise Exception('Configuration file does not exists. To create it, '
+                        'execute tffpy.utils.generate_config()')
+    config = ConfigParser()
+    config.read(config_file)
+    data_path = Path(config['DATA']['data_path'])
+    if not data_path.exists():
+        raise Exception('Invalid data path: {}. Update configuration file {}'
+                        .format(data_path, config_file))
+    return data_path