From f52062e92bb51ec1dffbb21583ea340fde6db5fc Mon Sep 17 00:00:00 2001
From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr>
Date: Mon, 20 Apr 2020 06:35:50 -0400
Subject: [PATCH] Removed old files

---
 ...ltiviews_datasets_generator.cpython-36.pyc |  Bin 7621 -> 0 bytes
 late/execute.py                               |  286 -----
 late/multiviews_datasets_generator.py         |  220 ----
 late/parameters.py                            |   60 -
 late/test_generator.py                        | 1140 -----------------
 .../_old_multiviews_datasets.py               |  194 ---
 multiview_generator/_old_parameters.py        |   83 --
 multiview_generator/_old_result.py            |   88 --
 multiview_generator/_old_update_baptiste.py   |  635 ---------
 .../_old_use_generator_baptiste.py            |   41 -
 .../tests/_old_test_classifier.py             |  823 ------------
 .../tests/_old_unit_test_update.py            |   52 -
 12 files changed, 3622 deletions(-)
 delete mode 100644 late/__pycache__/multiviews_datasets_generator.cpython-36.pyc
 delete mode 100644 late/execute.py
 delete mode 100644 late/multiviews_datasets_generator.py
 delete mode 100644 late/parameters.py
 delete mode 100644 late/test_generator.py
 delete mode 100644 multiview_generator/_old_multiviews_datasets.py
 delete mode 100644 multiview_generator/_old_parameters.py
 delete mode 100644 multiview_generator/_old_result.py
 delete mode 100644 multiview_generator/_old_update_baptiste.py
 delete mode 100644 multiview_generator/_old_use_generator_baptiste.py
 delete mode 100644 multiview_generator/tests/_old_test_classifier.py
 delete mode 100644 multiview_generator/tests/_old_unit_test_update.py

diff --git a/late/__pycache__/multiviews_datasets_generator.cpython-36.pyc b/late/__pycache__/multiviews_datasets_generator.cpython-36.pyc
deleted file mode 100644
index 828277d0901732be8b003cb85e93cd846ecc6115..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 7621
zcmXr!<>g9vsUO#=!ocvD0SWLjFfceUFfbG+F)%QsFhnt=Fs3l&Fy=BvF)@PJ%sI@t
zEKw|AHcJj`E?X2^E_)O^n9rKS5yhFpn8KRF6~&#xmcrh`5XF<ik;2)+5XGA!mLixU
z)WR6Wmm-`Z(!vnMpTZi<pebHu$>m&>npl#WqL81b5K@||;Fn*fU}UCXXli9_VP$Ng
zU}Ruu$;IW6SXz>iUu30_lv<RRSd@}l!u1m5ZcWBp!nujrsqx7<iN(d4X_?81C7Jno
znvAzt(-TXJi#3^Uu_UKv=G<aU%gN6#@=Im{>0)4D;9_84U}a!na0dBy3IhW}2}2D-
zGh;1N31bao2@{Ad0^&2*FxN0;GZd-RFx4{GFxRrAFxIkGB$Y6yFqN=?R5dfXFvJGb
zvehuuu%$3bFi0}gvV+)?47D63tTl`^>|k>^YglVI%^7N0YglSHB^YWrvKfk6YM5&{
zAbbf1a|V$4HLMa0wJe1;;S4p*!3>(riBgOV3|tBd3JO7~C8b4q#R?@EsS3H7d6~J1
zISMJ6xv6=@nfZAN`DqF{i6yCdB?`p_iOH!78mW2trRf<8CHV?TrI|S?U~MUhC5gqU
zC7KFp`9%upsd=eIi6!|(@wufrC7ETJspZA-P!$Tv`MCv&MX4zu{ZJX0RmJf+nZ+ew
zCxMB8#G=I9)RNSqVk<CT7XiS0tlAZ<6yT-=DOf3_<>V(q%mjHAtPkp6R71fsiFpc{
zc@Wh_j0_A6FF}MR+b!nY#EK#o1_p*(%*Ca-w^)l3^U_mqG3TV_6@g+}lk1i!R>$08
z47w$hn^+MKl8uLl=`BtKQ?MvCr8GG;1xc2xC^a{~EHxfv6-QoaE?D<1POupTMXAZR
zc)-l$(xRf&ypmgN5alZwibNS07=A_RXXNLm>L(=@lw=l{r0TomCzs}?=9LudyJwb^
z=NDz`!($~L9#i_DC`L%YqpKL9M6aOomKaD`UP(MSB_LcZ07~g{pybcX!6?AQ!N|hI
z!pOoT!pOtO0mTB0JWNI63=9nLYy!$I%&=?%GNA@k3^2Ab)i9<pNiqa8Xfh@GV@sk1
zMfq8&$)LQ5NShEBfigNM5hv&8l;-9YgQSx)@{3dR;-Mno1g4;onWunFJijO<wMY}3
z5WobX^oJZ63RVh<MMa5~V2#)eM@p8&B}A}sATPkvih@R7X>L+#5lFBozZ~p8WH;z2
z<fP`Mmt=tCu{Z(ZGe2-D(PX;CgRJ$IFcuA9l@LM<l=it0A#VhVaZn60@D@pdGRg~P
z28NfQ=zIwheff)lfuV>EB*6nBv_KILssTW0M3#YpfscWK0i?S4783(Q4Py#pHbap>
z2}24~Gh-8D4HKBJP{NSH4CXVZuw*k78I&-ju!8w4DQwvc#SS$rDJ<D6MII##DePcz
zR<OE=5{48GFrN*it|+5~A%zpnX9vqylrW@lf%zO@en$yI3O9sb!zs>?!YIv<!c)T~
z!XUu_mS0iAkirX+FJa7L$^w<EP*pWtHH;~Iz09?|B`h_(&5X6Y=?v)%ObnfjwR{yx
zHM}kiu~D`BHT<9wx%gcTe+_3gOVO<k#w^wv{$@r-h7{#mfeM`(0g(Iz#$urw0g#Fb
zj71tHY&9G(x72VoGt~-AU?}_-&QQab#abf((pz{doS~gDjWLBkMWBTPWLGm|4WBqe
zEpLioia-kkM5LCthBu2nMJPqMmywa7hPi~JhPQ^LnMs79R<MM#Mi69D30I9^4ZAd`
zMiMIFOcAMJXl7z$DB(;Itr3K>#A<|~Eb$aJC`$s)lB{8Zib>V5LRr!^tTk*R5R<rT
z7-|G-gi_d2*lL(+SZlabWFWFNJSnm%a=kn;Otr$bA|;GjJSp-Zx0mqN2xsx7ut+jA
zGrBM|Gd45Uil#GkGNd!qidB@A@YXPb#ACB+#lbPcUn5$>TO%&XkRqF+(92va(ZNu{
zQX|pKXwJ~dP$EzxTqDuUR4Z8{mc?2lStF6g1oky3F8OODN(5_IAi6<yDbxh162>f{
z8qpf5W~N%v2~34%;SB8zX^bg~;II}2xv5sVgsVijM!K1?Mo^j|MM;Dqg|V5bR;EU{
zMx;ipMy5t64b%!zNnFN>R?8+L>PH25NnES|tslYV2bdsK`sBqIC*~I9q!ue!!7D9L
z$pXVr0~HWu224GcVg#xfQbgo|D;%h~3U;;%MqmfQ3|4@-70GB&g;b2DDnBn(0VJ-F
zS*(z#P@Gx<Dwm4$b5j-4QWHx`i&BdfG@#|OCP)fYazn}_NJ|53TppzP1@k2~*I;uG
zxXqSYtN=D6Dn2bSxg@^`)*1jAVhEN&cnN#M2i-{o8b6uE3dxB%$)!1<=1oRw9wdGX
zaxzm>6p|_x;O+q14QiYiDA?FSS|}g}!c9R6whD#{Hg*bm`3eR3#l@LPnK_vym7q`p
zH5c+y67!NP6_Qd*%2QMGz=4bie2`uPklx(H3UE`76azrPU0Muka>W;<7D2)ulD-gu
z1`gZ!;?x2x!D;~Z8d9*rm~c%BwhGuR(Sj?0nhG{3FFrFbEx#x?u_UuBH9jS?EHedf
zB*JV{P*A|8wJ0&OIJFoQHi%4-m!GGLWC<c|LKK6Yfn+Qa6Ppc)Xv1Z$4pOecYOgKs
zwl_Y<z*`153@yqp%}dchv?b7+_4?RC3{oE82y?7m1yF`Zq(v0#z*3;bSBWOH`K3^l
zn3s~Dn^UQfR+OKs;HROHqV1xoZ=|D8T#}fVl30`ypORXZ3GR2r8EGn%XO?6rq=2#t
z7K3eU6<ojp<bs?;Qour>Sc7E;Os#Mwm{N3bE~Zyu?uWYz<a`ti)1v?n9#GK$Dy_g>
z)6B#?g_O+VlA_F{QgAz&P{`u8X+VMm<X5nhaQD=rKn?CZNShnvcepz>;Ps-8LXZ};
zD$`W31$R@S{hibl1!!Fewxtr6SqcclU?ybdm87Pp7AfQ;CZ*;SgS)%nl1m{s71T5@
z&d4kPIVUwSIRk13*os7OrxYXub&*0zDY)*|h=?!KQSgi}gQjjsrxX+b5%FaTwhB=S
z&_;J^ib7&eeqOplN_?4u23jEjiup2KQ0@R{GED_&y{eFz2QnM%1dy4YU^77`!xZF|
z=B5_q7elfZq*N@*%u5HkGzxACR1w&i(!AnQ<lbsAsG>*)4G&l;6r>h`Sc&PWApH=P
za6`}woJ{133#BYoup(4O;wi^;QOhw<bptON^YTIcdw7KcFY6)2ToI^aQpE#{AW*xf
ziVsv*B^HBf4G<sPrKz%jSLAk}^1dLyv`8T*RUs)qPa!Qo58N}&O9geg6;e_aa?}%Z
z(({WlOEPj(!D9+l+~8V22vm$2R!M>^z^HLS=2nS<6BcqsYiFxqP$hv9R?tcjtbr{B
zTuB$nFfcGw)p_P9q$QS?C=`~aDnQFs1ziPWEm+xsC|bd~6u{m{EX&U<Qb<WHRwzr%
zNi8iZ1~or2LA7tO0x0zIN^?Lh&Yb+BVu<Hba|?3vGr{ac1rRF_R9)w!YBJwq&dE={
z#af)4n3H;or8u)HH5oLT0_p*Rx`?3B6k$*=Qi6ejA)TRyAy%N4v4*jOA)CF3rG{}L
zQz1(*WJCkp1Jz`@#iVC&iv^UvH5qSlL&iM7{dur_5vYgyi^~Qy_5&JXvYP}l8)Ok1
zLzMwE-(ww-(zD6QPfpA!w$nps#&Bsc$TSg{OL=M;OBg`C5F}S>GTvg&%q!7kDpCRU
zeN{mOXgmhtx*~N328PKXgCNcd(PX^EUX)svT2!1`#SJgE6s)S)Q!;boqZF*F_)+3o
z!K#WkFCNwaEK#rmCDx+U0>~hz703*5k^wcAtHiJ-EUPMRuo6(JQm{$~MFt4FY4Y6S
zguCDtS6)1<iGGV0#R<2#ksJV02eTfe9c*%H@huT-R^MVTPt8ovC@H?hmX?!Q5MOzV
z6RaNOl3P5GhCqCANn%OrE%xG!(zLW3)bs)xEda*?Xs8IBUN}JM1t|uKK*0?S|67ci
zU@n9Jr44ZCuSJBuCR33zXi$T_D77FbF&Wh|k`pI5(r>XrN>xq9TO7HmC5iE#@nvWZ
z*oAPw%M{Sa$p8QU|7(ie;tEU5DNS`PD#|aq#gtcYiwzPWx7Z-r=@ttpUEg9$EGS6L
zODO_{#VwZN{Gt-@klHQQf}+g4l3T3cHeL}Zb=_hG4<8kAg1pFDoRL_NdW$VNBR?}a
z6+B)89=5#2S)5vul9`rfm{J59wAPd@0u9vMVuu!iMfxD!9I(=^2sD&aWB?KqfJYs)
zwSG$&C7D1<rCW?Cw-{Y+31OBvMTVg4VFEIWIVHaA7Dr-A3OFy{;s7OmP$s{{oS2ew
zixtF;D$)U|WQ@ASSb2-tGrka%ZHh~CN{Wj>*$gC9c8fV8zU-DzY7V&IEsihAkB1Zy
zx5Oaz5Ms0vGR%BS5>p&ne}Usnlc~rO<VZCT0ZKr(Wbl<DPN0Y_0p%ql4n_nNKw`s0
zB^WuFr5HIFxfr<^1sJ&)Ss29_B^db_#TfaRc$oMYIhc5uKynDo$IQjZ!6?Qm$H>CS
z!pO%Y#t2fu$0Wxn#3;ll##j^w8ean?T95-kaSj@l2G2iqf#)Bx7(t~~ElUkc787Wg
zRh%J(DTTQgG|t12#aP4C%vj4>!wQyTLCB@B*0NQ2)G)g+#5&Znmw;5Vr?5&g)Uc<p
zNiw9c_cGOT)Np`xafmaNFxRj(Gm0~$fJVolrq{5gu$eQ|vK2aoGo)|@GiY)r&Vdc-
zLMCUBhAa_-A>gE-psiq}kX&4*kd_Ima!T^SEx63Q6wpLWYKlTZVo3(LMdAjQ&r8fr
zRY0_!AOi`IVPQS6DfysA8)zN_A_&$BY7;?Qbn!Xh79CtIsxVwNC}|qN6`?sf+8`FJ
z5KIsnzX7>l!AhaHqzJ4W*}<UpD10y()gcO23WhF_W-nM9nhT(_AE4$Jq=5-m1o9@B
zfxA8Jm!FpkR#GK^>=KYq-9S@bnRzLx6;(p0jseMn+ce;I5vV$Y6o8O?S;Yd9_tRv#
z#Z-`Ti_;~sB+(5tp>vC^BtJg6xa<~laY>OT=PedcNECsZ0a1LY=11|PdG8i~N?JU!
z`)+Z91wd`_DCXpXl3QG$2#N>${T6#l8h9iNoSz{CI3x2FrGiTjP$7|AT(%#Sb%j7V
zN0^IIhzW#Q7`Yh5808p?vKSZ`o`-_LQ+rL`B56=*0BT=>8;eDtIgg@jkQivJ8zKg3
zMM7BeAPErHuSkJ`fk9K?7JGbrN`7*D{4Igv?3~oZqC7q5z*}(<sMId91L=U2?zxF2
z8Q>`=NKwt2SDIT;S!4?;2-ylio#W!7M38FGoC>5!2NeQf2SdCHstqA5u%|g}a`RJ4
hb5iZtLEZ(8*|RY5F!C^hODJXzB@O`wMhN7p1OQ&zgPs5Y

diff --git a/late/execute.py b/late/execute.py
deleted file mode 100644
index a3810c0..0000000
--- a/late/execute.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import os
-import yaml
-import numpy as np
-from sklearn.datasets import make_classification
-from random import gauss
-from math import ceil, floor
-import pandas as pd
-import shutil
-import h5py
-
-class MultiviewDatasetGenetator():
-
-    def __init__(self, n_samples=100, n_views=2, n_classes=2,
-                                Z_factor=2,
-                                R=0,
-                                n_clusters_per_class=1,
-                                class_sep_factor=10,
-                                n_informative_divid=2,
-                                d=4,
-                                D=10,
-                                standard_deviation=2,
-                                weights=None,
-                                flip_y=0.0,
-                                random_state=42, config_path=None):
-        if config_path is not None:
-            with open(config_path) as config_file:
-                args = yaml.safe_load(config_file)
-                self.__init__(**args)
-        else:
-            self.n_samples = n_samples
-            self.n_views = n_views
-            self.n_classes = n_classes
-            self.Z_factor = Z_factor
-            self.R = R
-            self.n_clusters_per_class = n_clusters_per_class
-            self.class_sep_factor = class_sep_factor
-            self.n_informative_divid = n_informative_divid
-            self.d = d
-            self.D = D
-            self.standard_deviation = standard_deviation
-            self.weights = weights
-            self.flip_y = flip_y
-            self.random_state = random_state
-
-    def generate(self):
-        if self.n_views < 2:
-            raise ValueError("n_views >= 2")
-        if self.n_classes < 2:
-            raise ValueError("n_classes >= 2")
-        if self.Z_factor < 1:
-            raise ValueError(
-                "Z_factor >= 1 pour le bon fonctionnement de l'algorithme")
-        if (self.R < 0) or (self.R > 1):
-            raise ValueError("0 <= R <= 1")
-        if self.n_clusters_per_class < 1:
-            raise ValueError("n_clusters_per_class >= 1")
-        if self.class_sep_factor < 0:
-            raise ValueError("class_sep_factor >= 0")
-        if self.n_informative_divid < 1:
-            raise ValueError("n_informative_divid >= 1")
-        if self.d < 1:
-            raise ValueError("d >= 1")
-        if (self.d + self.D) / 2 - 3 * self.standard_deviation < 1:
-            raise ValueError(
-                "Il faut que (d+D)/2 - 3*standard_deviation >= 1 pour avoir des valeurs positives non nulles lors de l'emploi de la loi normale")
-
-        # n_views dimension of view v values randomly from N((d+D)/2, standard_deviation^2)
-        d_v = np.random.normal(loc=(self.d + self.D) / 2,
-                               scale=self.standard_deviation,
-                               size=self.n_views)
-        d_v = list(d_v)
-        remove_list, add_list = [], []
-        for dim_view in d_v:
-            if dim_view < self.d or dim_view > self.D:  # 1 <= d <= dim_view <= D
-                remove_list.append(dim_view)
-                add = -1
-                while add < self.d or add > self.D:
-                    add = gauss((self.d + self.D) / 2, self.standard_deviation)
-                add_list.append(add)
-        d_v = [view for view in d_v if view not in remove_list] + add_list
-        d_v = [int(view) for view in d_v]  # dimension of views = integer
-        # d_v = list of views dimension from the highest to the lowest
-        d_v.sort(reverse=True)
-        # Dimension of latent space Z (multiplied by Z_factor)
-        self.dim_Z = self.Z_factor * self.latent_space_dimension(d_v)
-        # Number of informative features
-        self.n_informative = round(self.dim_Z / self.n_informative_divid)
-        # Generation of latent space Z
-        self.Z, self.y = make_classification(n_samples=self.n_samples, n_features=self.dim_Z,
-                                   n_informative=self.n_informative, n_redundant=0,
-                                   n_repeated=0, n_classes=self.n_classes,
-                                   n_clusters_per_class=self.n_clusters_per_class,
-                                   weights=self.weights,
-                                   flip_y=self.flip_y,
-                                   class_sep=self.n_clusters_per_class * self.class_sep_factor,
-                                   random_state=self.random_state, shuffle=False)
-        I_q = np.arange(self.Z.shape[1])
-        meta_I_v = []
-        self.results = []
-        for view in range(n_views):
-            # choice d_v[view] numeros of Z columns uniformly from I_q
-            I_v = np.random.choice(I_q, size=d_v[view],
-                                   replace=False)  # tirage dans I_q sans remise de taille d_v[view]
-            meta_I_v += list(I_v)
-            # projection of Z along the columns in I_v
-            X_v = self.projection( I_v)
-            self.results.append((X_v, I_v))
-            # remove R*d_v[view] columns numeros of I_v form I_q
-            elements_to_remove = np.random.choice(I_v,
-                                                  size=floor(self.R * d_v[view]),
-                                                  replace=False)  # tirage dans I_v sans remise de taille floor(R*d_v[view])
-            I_q = np.setdiff1d(I_q,
-                               elements_to_remove)  # I_q less elements from elements_to_remove
-        self.unsued_dimensions_list = [column for column in I_q if
-                                  column not in meta_I_v]
-        self.unsued_dimensions_percent = round(
-            (len(self.unsued_dimensions_list) / self.dim_Z) * 100, 2)
-
-    def projection(self, chosen_columns_list):
-        """
-        Returns the projection of latent_space on the columns of chosen_columns_list (in chosen_columns_list order)
-
-        Parameters:
-        -----------
-        chosen_columns_list : list
-
-        Returns:
-        --------
-        an array of dimension (number of rows of latent_space, length of chosen_columns_list)
-        """
-        return self.Z[:, chosen_columns_list]
-
-    def latent_space_dimension(self, views_dimensions_list):
-        """
-        Returns the minimal dimension of latent space (enough to build the dataset) for generator_multiviews_dataset compared to views_dimensions_list
-
-        Parameters:
-        -----------
-        views_dimensions_list : list
-        R : float
-
-        Returns:
-        --------
-        an int
-        """
-        max_view_dimension = max(views_dimensions_list)
-        dimension = ceil(self.R * sum(views_dimensions_list))
-
-        if dimension < max_view_dimension:
-            dimension = max_view_dimension
-
-        reduced_dimension = dimension
-        remove_sum = 0
-
-        for num_view in range(1, len(views_dimensions_list)):
-            view_prec = views_dimensions_list[num_view - 1]
-            view_current = views_dimensions_list[num_view]
-            remove = floor(self.R * view_prec)
-            remove_sum += remove
-            if reduced_dimension - remove < view_current:
-                dimension += view_current - (reduced_dimension - remove)
-            reduced_dimension = dimension - remove_sum
-
-        return dimension
-
-    def to_csv(self, saving_path="."):
-        """
-        Create length of multiviews_list + 2 csv files to the indicated path
-        Files name :
-            latent_space.csv for latent_space
-            integer_labels.csv for integer_labels
-            view0.csv for multiviews_list[0]
-
-        Parameters:
-        -----------
-        path : str
-        latent_space : array
-        integer_labels : 1D array
-        multiviews_list : list of tuples
-
-        Returns:
-        --------
-        None
-        """
-        df_latent_space = pd.DataFrame(self.Z)
-        df_latent_space.to_csv(os.path.join(saving_path, 'latent_space.csv')
-                               , index=False)
-
-        df_labels = pd.DataFrame(self.y)
-        df_labels.to_csv(os.path.join(saving_path, 'integer_labels.csv'),
-                         index=False)
-
-        for view_index, view_tuple in enumerate(self.results):
-            df_view = pd.DataFrame(view_tuple[0], columns=view_tuple[1])
-            df_view.to_csv(os.path.join(saving_path,
-                                        'view'+str(view_index)+'.csv'),
-                           index=False)
-
-    def to_hdf5(self, saving_path=".", name="generated_dset"):
-
-        dataset_file = h5py.File(os.path.join(saving_path, name+".hdf5"), 'w')
-
-        labels_dataset = dataset_file.create_dataset("Labels",
-                                                     shape=self.y.shape,
-                                                     data=self.y)
-
-        labels_names = ["Label_1", "Label_0"]
-
-        labels_dataset.attrs["names"] = [
-            label_name.encode() if not isinstance(label_name, bytes)
-            else label_name for label_name in labels_names]
-
-        for view_index, (data, feature_indices) in enumerate(self.results):
-            df_dataset = dataset_file.create_dataset("View" + str(view_index),
-                                                     shape=data.shape,
-                                                     data=data)
-
-            df_dataset.attrs["sparse"] = False
-            df_dataset.attrs["name"] = "GeneratedView"+str(view_index)
-
-        meta_data_grp = dataset_file.create_group("Metadata")
-
-        meta_data_grp.attrs["nbView"] = len(self.results)
-        meta_data_grp.attrs["nbClass"] = np.unique(self.y)
-        meta_data_grp.attrs["datasetLength"] = \
-        self.results[0][0].shape[0]
-
-        meta_data_grp.create_dataset("example_ids", data=np.array(
-            ["gen_example_" + str(ex_indx) for ex_indx in
-             range(self.results[0][0].shape[0])]).astype(
-            np.dtype("S100")), dtype=np.dtype("S100"))
-
-        dataset_file.close()
-
-if __name__=="__main__":
-    n_samples = 100  # Number of samples in tha dataset
-    n_views = 4  # Number of views in the dataset
-    n_classes = 2  # Number of classes in the dataset
-    Z_factor = 2  # Z dim = latent_space_dim * z_factor
-    R = 0  # Precentage of non-redundant features in the view
-    n_clusters_per_class = 1  # Number of clusters for each class
-    class_sep_factor = 10000  # Separation between the different classes
-    n_informative_divid = 2  # Divides the number of informative features in the latent space
-    standard_deviation = 2
-    d = 4
-    D = 10
-    flip_y = 0.00
-    random_state = 42
-    weights = None # The proportions of examples in each class
-
-    path = "/home/baptiste/Documents/Datasets/Generated/metrics_dset/"
-    name = "metrics"
-    if not os.path.exists(path):
-        os.mkdir(path)
-
-    multiview_generator = MultiviewDatasetGenetator(n_samples=n_samples,
-                                                    n_views=n_views,
-                                                    n_classes=n_classes,
-                                                    Z_factor=Z_factor,
-                                                    R=R,
-                                                    n_clusters_per_class=n_clusters_per_class,
-                                                    class_sep_factor=class_sep_factor,
-                                                    n_informative_divid=n_informative_divid,
-                                                    d=d,
-                                                    D=D,
-                                                    standard_deviation=standard_deviation,
-                                                    flip_y=flip_y,
-                                                    weights=weights,
-                                                    random_state=random_state)
-
-    multiview_generator.generate()
-    multiview_generator.to_hdf5(saving_path=path, name=name)
-
-    # for filename in os.listdir(path):
-    #     file_path = os.path.join(path, filename)
-    #     try:
-    #         if os.path.isfile(file_path) or os.path.islink(file_path):
-    #             os.unlink(file_path)
-    #         elif os.path.isdir(file_path):
-    #             shutil.rmtree(file_path)
-    #     except Exception as e:
-    #         print('Failed to delete %s. Reason: %s' % (file_path, e))
-    # changing_labels_indices = np.random.RandomState(random_state).choice(np.arange(y.shape[0]), n_outliers)
-    # print(changing_labels_indices)
-    # y[changing_labels_indices] = np.invert(y[changing_labels_indices].astype(bool)).astype(int)
-    # results_to_csv(path, Z, y, results)
\ No newline at end of file
diff --git a/late/multiviews_datasets_generator.py b/late/multiviews_datasets_generator.py
deleted file mode 100644
index 2f4e5e7..0000000
--- a/late/multiviews_datasets_generator.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov 26 15:38:38 2019
-
-@author: bernardet
-"""
-
-from sklearn.datasets import make_classification
-from random import gauss
-from math import ceil, floor
-import numpy as np
-import pandas as pd
-
-
-def latent_space_dimension(views_dimensions_list, R):
-    """
-    Returns the minimal dimension of latent space (enough to build the dataset) for generator_multiviews_dataset compared to views_dimensions_list
-    
-    Parameters:
-    -----------
-    views_dimensions_list : list
-    R : float
-        
-    Returns:
-    --------
-    an int
-    """
-    max_view_dimension = max(views_dimensions_list)
-    dimension = ceil(R*sum(views_dimensions_list))
-    
-    if dimension < max_view_dimension:
-        dimension = max_view_dimension
-            
-    reduced_dimension = dimension
-    remove_sum = 0
-    
-    for num_view in range(1, len(views_dimensions_list)):
-        view_prec = views_dimensions_list[num_view - 1]
-        view_current = views_dimensions_list[num_view]
-        remove = floor(R*view_prec)
-        remove_sum += remove
-        if reduced_dimension - remove < view_current:
-            dimension += view_current - (reduced_dimension - remove)
-        reduced_dimension = dimension - remove_sum
-            
-    return dimension
-  
-
-def projection(latent_space, chosen_columns_list):
-    """
-    Returns the projection of latent_space on the columns of chosen_columns_list (in chosen_columns_list order)
-    
-    Parameters:
-    -----------
-    latent_space : array
-    chosen_columns_list : list
-        
-    Returns:
-    --------
-    an array of dimension (number of rows of latent_space, length of chosen_columns_list)
-    """
-    return latent_space[:, chosen_columns_list]
-
-
-def generator_multiviews_dataset(n_samples=1000, n_views=3, n_classes=2,
-                                 Z_factor=250, R=2/3, n_clusters_per_class=1,
-                                 class_sep_factor=2, n_informative_divid=2,
-                                 d=2, D=12, standard_deviation=2, weights=None,
-                                 random_state=42):
-    """
-    Returns a multiview_generator multiviews dataset
-    
-    Parameters:
-    -----------
-    n_samples : int
-                dataset number of samples (number of rows of dataset)
-    n_views : int >= 2
-              dataset number of views
-              one view is a set of some features (columns) of the latent space
-    n_classes : int >= 2
-                dataset number of classes 
-    Z_factor : float >= 1
-               minimal dimension of the latent space (enough to build the dataset) is calculed then multiplied by Z_factor 
-    R : 0 <= float <= 1
-        R = 1 <> no possibility of redundancy between views
-        R = 0 <> maximal possibility of redundancy between views
-    n_clusters_per_class : int >= 1
-    class_sep_factor : float >= 0
-                       class_sep = n_clusters_per_class*class_sep_factor
-    n_informative_divid : float >= 1
-                          n_informative_divid raises <> number of non-informative features raises
-                          n_informative_divid = 1 <> no non-informative features, number of informative features = dimension of latent space
-                          number of informative features = round(dimension of latent space / n_informative_divid)
-    d : float >= 1
-        minimal dimension of views
-        dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-    D : float >= d
-        maximal dimension of views
-        dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-    standard_deviation : float
-                         standard deviation of the gaussian distribution N((d+D)/2, standard_deviation^2)
-                         dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-        
-    Returns:
-    --------
-    Z : an array of dimension(n_samples, R*n_views) = the generated samples
-    y : an array of dimension (n_samples) = the integer labels for class membership of each sample
-    a list of n_views tuples (X_v, I_v) with :
-        X_v = Z projected along d_v (= dimension of the v-ith views) columns in I_v
-        I_v = X_v columns numeros with numberring of Z columns numeros
-    unsued_dimensions_percent : percentage of unsued columns of latent space in views
-    n_informative : number of informative features (dimension of latent space - n_informative = number of non informative features)
-    """
-    
-    if n_views < 2:
-        raise ValueError("n_views >= 2")
-    if n_classes < 2:
-        raise ValueError("n_classes >= 2")
-    if Z_factor < 1:
-        raise ValueError("Z_factor >= 1 pour le bon fonctionnement de l'algorithme")
-    if (R < 0) or (R > 1):
-        raise ValueError("0 <= R <= 1")
-    if n_clusters_per_class < 1:
-        raise ValueError("n_clusters_per_class >= 1")
-    if class_sep_factor < 0:
-        raise ValueError("class_sep_factor >= 0")
-    if n_informative_divid < 1:
-        raise ValueError("n_informative_divid >= 1")
-    if d < 1:
-        raise ValueError("d >= 1")
-    if (d+D)/2 - 3*standard_deviation < 1:
-        raise ValueError("Il faut que (d+D)/2 - 3*standard_deviation >= 1 pour avoir des valeurs positives non nulles lors de l'emploi de la loi normale")
-    
-    # n_views dimension of view v values randomly from N((d+D)/2, standard_deviation^2)
-    d_v = np.random.normal(loc=(d+D)/2, scale=standard_deviation, size=n_views)
-    d_v = list(d_v)
-    remove_list, add_list = [], []
-    for dim_view in d_v:
-        if dim_view < d or dim_view > D:  # 1 <= d <= dim_view <= D
-            remove_list.append(dim_view)
-            add = -1
-            while add < d or add > D:
-                add = gauss((d+D)/2, standard_deviation)
-            add_list.append(add)
-    d_v = [view for view in d_v if view not in remove_list] + add_list
-    d_v = [int(view) for view in d_v]  # dimension of views = integer
-    # d_v = list of views dimension from the highest to the lowest
-    d_v.sort(reverse=True)
-    # Dimension of latent space Z (multiplied by Z_factor)
-    dim_Z = Z_factor*latent_space_dimension(d_v, R)
-    print(dim_Z)
-    # Number of informative features
-    n_informative = round(dim_Z/n_informative_divid)
-    # Generation of latent space Z
-    print("n_samples :", n_samples)
-    print("dim_Z :", dim_Z)
-    print("n_informative :", n_informative)
-    print("n_redundant :", 0)
-    print("n_repeated :", 0)
-    print("n_classes :", n_classes)
-    print("n_clusters_per_class :", n_clusters_per_class)
-    print("class_sep :", n_clusters_per_class*class_sep_factor)
-
-
-    Z, y = make_classification(n_samples=n_samples, n_features=dim_Z, n_informative=n_informative, n_redundant=0,
-                               n_repeated=0, n_classes=n_classes, n_clusters_per_class=n_clusters_per_class, weights=weights,
-                               flip_y=0.00, class_sep=n_clusters_per_class*class_sep_factor, random_state=random_state, shuffle=False)
-    # Z, y = make_classification(n_samples=200, n_features=10, n_informative=2, n_redundant=0,
-    #                            n_repeated=0, n_classes=2, n_clusters_per_class=1, weights=None,
-    #                            flip_y=0, class_sep=100, random_state=random_state, shuffle=False)
-        
-    I_q = np.array([i for i in range(Z.shape[1])])  # 1D-array of Z columns numero
-    meta_I_v = []
-    results = []
-    for view in range(n_views):
-        # choice d_v[view] numeros of Z columns uniformly from I_q
-        I_v = np.random.choice(I_q, size=d_v[view], replace=False)  # tirage dans I_q sans remise de taille d_v[view]
-        meta_I_v += list(I_v)
-        # projection of Z along the columns in I_v
-        X_v = projection(Z, I_v)
-        results.append((X_v, I_v))
-        # remove R*d_v[view] columns numeros of I_v form I_q
-        elements_to_remove = np.random.choice(I_v, size=floor(R*d_v[view]), replace=False)  # tirage dans I_v sans remise de taille floor(R*d_v[view])
-        I_q = np.setdiff1d(I_q, elements_to_remove)  # I_q less elements from elements_to_remove
-    unsued_dimensions_list = [column for column in I_q if column not in meta_I_v]
-    unsued_dimensions_percent = round((len(unsued_dimensions_list) / dim_Z)*100, 2)
-    return Z, y, results, unsued_dimensions_percent, n_informative
-
-
-def results_to_csv(path, latent_space, integer_labels, multiviews_list):
-    """
-    Create length of multiviews_list + 2 csv files to the indicated path
-    Files name :
-        latent_space.csv for latent_space
-        integer_labels.csv for integer_labels
-        view0.csv for multiviews_list[0]
-    
-    Parameters:
-    -----------
-    path : str
-    latent_space : array
-    integer_labels : 1D array
-    multiviews_list : list of tuples
-        
-    Returns:
-    --------
-    None
-    """
-    df_latent_space = pd.DataFrame(latent_space)
-    df_latent_space.to_csv(path+'latent_space.csv', index=False)
-    
-    df_labels = pd.DataFrame(integer_labels)
-    df_labels.to_csv(path+'integer_labels.csv', index=False)
-    
-    cpt = 0
-    for view_tuple in multiviews_list:
-        df_view = pd.DataFrame(view_tuple[0], columns=view_tuple[1])
-        df_view.to_csv(path+'view'+str(cpt)+'.csv', index=False)
-        cpt += 1
diff --git a/late/parameters.py b/late/parameters.py
deleted file mode 100644
index 2b2f536..0000000
--- a/late/parameters.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov 26 13:53:05 2019
-
-@author: bernardet
-"""
-from sklearn.svm import SVC
-from sklearn.naive_bayes import GaussianNB
-import numpy as np
-
-# General parameters
-n_samples = 1000
-# number of samples (int)
-n_views = 3
-# number of views >= 2 (int)
-n_classes = 2
-# number of classes >= 3 (int)
-Z_factor = 250
-# multiplication factor of Z dimension (default value = 1)
-R = 2/3
-# redondance (float)
-cv = 10
-# number of cross-validation splitting (int)
-n_clusters_per_class = 1
-# number of clusters per class >= 1 (int)
-class_sep_factor = 2
-# factor >= 1 as class_sep = n_clusters_per_class*class_sep_factor
-n_informative_divid = 2
-# factor >= 1 as number of informative features = round(dimension of latent space / n_informative_divid)
-classifier = "SVM"
-# name of classifier (str)
-classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}
-# dictionary of classifiers
-n_samples_list = [100, 500, 1000, 1500, 2000]
-# list of number of samples to test multiview_generator
-R_list = list(np.arange(0, 1.05, 0.05))
-# list of diverse R
-Z_factor_list = [1, 3, 10, 25, 100, 250, 1000]
-# list of diverse Z_factor
-n_views_list = [n_view for n_view in range(2, 10)]
-# list of diverse n_views
-class_sep_factor_list = [2, 5, 10]
-# list of diverse class_sep_factor
-n_informative_divid_list = [1, 2, 3]
-# list of diverse n_informative_divid
-path_data = "/home/bernardet/Documents/StageL3/Data/"
-# path to register the multiview dataset
-path_graph = "/home/bernardet/Documents/StageL3/Graph/"
-# path to register scores graph
-
-# Parameters of gaussian distribution N((d+D)/2,  standard_deviation_2) :
-# d <= dim[v] <= D for all v
-# (d+D)/2 - 3*sqrt(standard_deviation_2) >= 0
-d = 4
-# < D, > 0
-D = 12
-# > d
-standard_deviation = 2
-# standard deviation of the gaussian distribution
diff --git a/late/test_generator.py b/late/test_generator.py
deleted file mode 100644
index dff19b2..0000000
--- a/late/test_generator.py
+++ /dev/null
@@ -1,1140 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Nov 28 14:14:46 2019
-
-@author: bernardet
-"""
-
-from multiviews_datasets_generator import generator_multiviews_dataset
-from sklearn.svm import SVC
-from sklearn.naive_bayes import GaussianNB
-from sklearn.model_selection import cross_val_score, StratifiedKFold
-from sklearn.metrics import accuracy_score
-from collections import Counter
-from mpl_toolkits.mplot3d import Axes3D
-from math import sqrt
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-from multimodalboost.mumbo import MumboClassifier
-
-
-def majority_list(predictions_list):
-    """
-    Returns an array which on each row the majority class of the same row in
-    predictions_list
-    
-    Parameters:
-    -----------
-    predictions_list : list of 1D array
-        
-    Returns:
-    --------
-    an 1D array
-    """
-    n_samples = len(predictions_list[0])
-    # majority_prediction[i] = prediction of predictions_list[i] which appears 
-    #the most on predictions_list[i]
-    majority_prediction = np.array([-1]*n_samples)
-    # concatenate_predictions_list[i] = list contains prediction of the i-th 
-    #data per view
-    reshape_predictions_list = [predictions_list[i].reshape(len(predictions_list[i]), 1) for i in range(len(predictions_list))]
-    concatenate_predictions_list = np.hstack(reshape_predictions_list)
-    for sample in range(n_samples):
-        # dictionary contains predictions (key) and its occurences in 
-        #concatenate_predictions_list[sample]
-        count = Counter(concatenate_predictions_list[sample])
-        maj_value = max(count.values())  # maximal number of a prediction
-        for key in count.keys():  # searchs the prediction with the maximal 
-            #occurence number
-            if count[key] == maj_value:
-                majority_prediction[sample] = key
-                break
-        
-    return majority_prediction
-
-
-def majority_score(views_dictionary, integer_labels, cv=10, classifier="SVM",
-                   classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}):
-    """
-    Returns the mean and the standard deviation of accuracy score when 
-    predictions are selected by majority of predictions of different views
-    
-    Parameters:
-    -----------
-    views_dictionary : dict
-    integer_labels = array
-    cv : int
-    classifier : str
-    classifier_dictionary : dict    
-        
-    Returns:
-    --------
-    Two floats
-    """   
-    skf = StratifiedKFold(n_splits=cv, random_state=1, shuffle=True)  
-    # provides cv train/test indices to split data in cv train/test sets.
-    prediction_list = [[] for i in range(cv)]  # for majority_list function
-    test_list =  [[] for i in range(cv)]  # for score
-    
-    for key in views_dictionary.keys():
-        i = 0
-        for train_index, test_index in skf.split(views_dictionary[key], integer_labels):
-            # splits data and integer label of one view in test and train sets
-            X = views_dictionary[key]
-            train, test = X[train_index], X[test_index]    
-            y_train = integer_labels[train_index]
-            y_test = integer_labels[test_index]
-            # trains the classifier and tests it with test set
-            clf = classifier_dictionary[classifier]
-            clf.fit(train, y_train.ravel())
-            y_pred = clf.predict(test)
-            
-            prediction_list[i].append(y_pred)
-            if len(test_list[i]) == 0:  # same y_test for all views
-                test_list[i] = y_test
-            i += 1
-            
-    score = []
-    for i in range(len(prediction_list)):
-        y_pred_majority = majority_list(prediction_list[i])  
-        # majority of views predictions
-        score.append(accuracy_score(test_list[i].ravel(), y_pred_majority))  
-        # score of majority of views predictions vs expected predictions
-    score = np.array(score)
-    return score.mean(), score.std()
-
-
-def score_one_multiview_dataset(cv=10, classifier="SVM",
-                                classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                                n_samples=1000, n_views=3, n_classes=2, 
-                                Z_factor=1, R=2/3, n_clusters_per_class=2, 
-                                class_sep_factor=2, n_informative_divid=1, 
-                                d=4, D=10, standard_deviation=2):
-    """
-    Returns 3 Series (first with dimensions of latent space, views and 
-    percentage of dimensions of latent space unsued in views, the second with 
-    accuracy score and the third with the standard deivation of accuracy score) 
-    of latent space, views, early fusion predictions (concatenate views 
-    predictions) and late fusion predictions (majority views predictions)
-    
-    Parameters:
-    -----------
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, 
-    class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters 
-    of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    3 Series
-    """
-    # dictionary contains percentage of unsued dimension of latent space and 
-    #dimension of latent space and views
-    dimensions = {'unsued dimension of latent space':0, "number of informative features":0, 'latent space':0}
-    dimensions.update({'view'+str(i):0 for i in range(n_views)})
-    # dictionary contains and mean of accuracy scores
-    dict_scores_means = {'latent space':0}
-    dict_scores_means.update({'view'+str(i):0 for i in range(n_views)})
-    dict_scores_means.update({'early fusion':0, 'late fusion':0})
-    # dictionary contains standard deviation of accuracy scores
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    dimensions["unsued dimension of latent space"] = unsued_dimensions_percent
-    dimensions["number of informative features"] = n_informative
-    dimensions["latent space"] = Z.shape
-
-    
-    for i in range(n_views):
-        # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-        dict_views['view'+str(i)] = multiviews_list[i][0]
-        dimensions['view'+str(i)] = multiviews_list[i][0].shape
-        
-    early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-    # dictionary of data
-    dict_data_df = {'latent space':Z}
-    dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-    dict_data_df.update({'early fusion':early_fusion})
-            
-    for key in dict_data_df.keys():
-        clf = classifier_dictionary[classifier]
-        score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-        dict_scores_means[key] = score.mean()
-        dict_scores_std[key] = score.std()
-    
-    mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-    dict_scores_means['late fusion'] = mean_majority
-    dict_scores_std['late fusion'] = std_majority
-    
-    df_dimensions = pd.Series(dimensions)
-    df_scores_means = pd.Series(dict_scores_means)
-    df_scores_std = pd.Series(dict_scores_std)
-            
-    return df_dimensions, df_scores_means, df_scores_std
- 
-
-def score_multiviews_n_samples(n_samples_list, path_graph, cv=10, classifier="SVM", 
-                               classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                               n_views=3, n_classes=2, Z_factor=1, R=2/3, 
-                               n_clusters_per_class=2, class_sep_factor=2, 
-                               n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the 
-    standard deivation of accuracy score) of latent space, views, early fusion 
-    predictions (concatenate views predictions) and late fusion predictions 
-    (majority views predictions) with n_samples_list as index for the indicated 
-    classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score (with confidence interval) vs n_samples_list
-    
-    Parameters:
-    -----------
-    n_samples_list : list
-                     each element from n_samples_list defines a new dataset 
-                     with element samples
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # n_samples_list  = list of samples dimension from the lowest to the highest
-    n_samples_list.sort(reverse=False)
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per n_samples
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per n_samples
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for n_samples in n_samples_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-                    
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-        
-        for key in dict_data.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-            
-    df_scores_means = pd.DataFrame(dict_scores_means, index=n_samples_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=n_samples_list)
-
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(n_samples_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_samples, percent, n_informative in zip(n_samples_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_samples)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(n_samples_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Number of samples\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs number of samples for classifier "+classifier)
-    plt.savefig(path_graph+"score_samples_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-
-    return df_scores_means, df_scores_std
-
-
-def graph_comparaison_classifier_scores_n_samples(classifier1, classifier2, 
-                                                  n_samples_list, path_graph, 
-                                                  cv=10, classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                                                  n_views=3, n_classes=2, 
-                                                  Z_factor=1, R=2/3, 
-                                                  n_clusters_per_class=2, 
-                                                  class_sep_factor=2, 
-                                                  n_informative_divid=1, 
-                                                  d=4, D=10, standard_deviation=2):
-    """
-    Creates and saves (at the indicated path path_graph) multiple graphs 
-    represented scores of classifier2 vs scores of classifier1 (one graph per 
-    column of result of score_multiviews_n_samples)
-    
-    Parameters:
-    -----------
-    classifier1 : str
-    classifier2 : str
-    n_samples_list : list
-                     each element from n_samples_list defines a new dataset 
-                     with element samples
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    None
-    """    
-    df_scores_clf1_means, df_scores_clf1_std = score_multiviews_n_samples(n_samples_list, path_graph, cv, classifier1, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    df_scores_clf2_means, df_scores_clf2_std = score_multiviews_n_samples(n_samples_list, path_graph, cv, classifier2, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    
-    n_samples_list = df_scores_clf1_means.index
-    keys = df_scores_clf1_means.keys()
-
-    for key in keys:
-        plt.figure()
-        plt.scatter(df_scores_clf1_means[key].values, df_scores_clf2_means[key].values, c=df_scores_clf1_means[key].values)
-        plt.plot([0.0, 1.1], [0.0, 1.1], "--", c=".7")  # diagonal
-        plt.xlabel("Accuracy score for "+classifier1)
-        plt.ylabel("Accuracy score for "+classifier2)
-        plt.xlim(0, 1)
-        plt.ylim(0, 1)
-        plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nAccuracy score of "+key+" for "+classifier2+" vs "+classifier1)
-        plt.savefig(path_graph+classifier1+"_"+classifier2+"_"+str(n_views)+"_"+key+".png")
-        plt.show()
-        plt.close()
-    
-    
-def score_multiviews_R(R_list, path_graph, cv=10, classifier="SVM", 
-                       classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                       n_samples=1000, n_views=3, n_classes=2, Z_factor=1, 
-                       n_clusters_per_class=2, class_sep_factor=2, 
-                       n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the 
-    standard deivation of accuracy score) of latent space, views, early fusion 
-    predictions (concatenate views predictions) and late fusion predictions 
-    (majority views predictions) with R_list as index for the indicated 
-    classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score (with confidence interval) vs R_list
-    
-    Parameters:
-    -----------
-    R_list : list
-             each element from R_list defines a new dataset with element as R  
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, n_clusters_per_class, 
-    class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters 
-    of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    2 DataFrames with R_list as index
-    """
-    # R_list  = list of diverse values of R from the lowest to the highest
-    R_list.sort(reverse=False)
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per R
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per R
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for R in R_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-            
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views
-        # dictionary of data
-        dict_data_df = {'latent space':Z}
-        dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data_df.update({'early fusion':early_fusion})
-                
-        for key in dict_data_df.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-        
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=R_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=R_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(R_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    R_label = []
-    for i in range(0, len(R_list), 4):
-        R_label.append(R_list[i])
-        label_index.append(str(round(R_list[i], 2))+'\n'+str(unsued_dimensions_percent_list[i])+'\n'+str(n_informative_list[i]))
-    
-    plt.xticks(R_label, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("R\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs R for classifier "+classifier)
-    plt.savefig(path_graph+"score_R_"+str(n_views)+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
-
-def score_multiviews_Z_factor(Z_factor_list, path_graph, cv=10, classifier="SVM", 
-                              classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                              n_samples=1000, n_views=3, n_classes=2, R=2/3, 
-                              n_clusters_per_class=2, class_sep_factor=2, 
-                              n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 3 DataFrames (first with accuracy score, the second with the 
-    standard deivation of accuracy score and the third with the error rate) of 
-    latent space, views, early fusion predictions (concatenate views 
-    predictions) and late fusion predictions (majority views predictions) with 
-    sum of views dimension divided by Z_factor_list as index for the indicated 
-    classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score vs sum of views dimension divided by Z_factor_list and a 
-    graph represented error rate (1 - accuracy score) vs sum of views dimension 
-    divided by Z_factor_list
-    
-    Parameters:
-    -----------
-    Z_factor_list : list
-                    each element from Z_factor_list defines a new dataset with 
-                    element as Z_factor 
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, R, n_clusters_per_class, class_sep_factor, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    3 DataFrames with Z_factor_list as index
-    """
-    # Z_factor_list  = list of diverse values of Z_factor from the highest to the lowest
-    Z_factor_list.sort(reverse=True)
-    # list of sum of views dimension for each Z_factor_list item
-    d_v = []
-    # list of Z dimension for each Z_factor_list item
-    Z_dim_list = []
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # same views have same colors on each graph
-    dict_colors = {'latent space':0}
-    dict_colors.update({'view'+str(i):0 for i in range(n_views)})
-    prop_cycle = plt.rcParams['axes.prop_cycle']
-    colors = prop_cycle.by_key()['color']
-    for key, c in zip(dict_colors.keys(), colors):
-        dict_colors[key] = c    
-    dict_colors.update({'early fusion':'purple', 'late fusion':'maroon'})
-    # dictionary contains mean of accuracy scores per Z_factor
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains error rate per Z_factor
-    dict_scores_error = {'latent space':[]}
-    dict_scores_error.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_error.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per Z_factor
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-        
-    for Z_factor in Z_factor_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-            
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views        
-        # dimension = number of columns
-        d_v.append(early_fusion.shape[1])
-        Z_dim_list.append(Z.shape[1])
-        # dictionary of data
-        dict_data_df = {'latent space':Z}
-        dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data_df.update({'early fusion':early_fusion})
-                
-        for key in dict_data_df.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_error[key].append(1 - score.mean())
-            dict_scores_std[key].append(score.std())
-        
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_error['late fusion'].append(1 - mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-        
-    d_v_divid_Z = np.divide(np.array(d_v), np.array(Z_dim_list))
-    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=d_v_divid_Z)
-    df_scores_error = pd.DataFrame(dict_scores_error, index=d_v_divid_Z)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=d_v_divid_Z)
-    
-    # index and label for graphics
-    label_index = [chr(i) for i in range(ord('a'),ord('z')+1)]
-    label_index = label_index[0:len(d_v)]
-    label_value = ""
-    for label, v_Z, dim_v, dim_Z, Z_factor, percent, n_informative in zip(label_index, d_v_divid_Z, d_v, Z_dim_list, Z_factor_list, unsued_dimensions_percent_list, n_informative_list):
-        label_value = label_value + label+" : V/Z = "+str(round(v_Z, 4))+", V = "+str(dim_v)+", Z = "+str(dim_Z)+", Z_factor = "+str(Z_factor)+", % ="+str(percent)+", n_informative = "+str(n_informative)+'\n'
-
-    x_label = "V/Z = sum of views dimension divided by latent space dimension with :\nV = sum of views dimension\nZ = latent space dimension multiplied by Z_factor\n% = percentage of dimensions of latent space unsued in views\nn_informative = number of informative features"
-    
-    plt.figure(figsize=(10, 10))  # accuracy score vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means[key], '.-', color=dict_colors[key], label=key)
-
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nAccuracy score vs ratio sum of views dimension / latent space dimension for classifier "+classifier)    
-    plt.savefig(path_graph+"score_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # error rate vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_error[key], '.-', color=dict_colors[key], label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Error rate for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nError rate vs ratio sum of views dimension / latent space dimension for classifier "+classifier)    
-    plt.savefig(path_graph+"error_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    plt.figure(figsize=(10, 10))
-    
-    for key in dict_scores_means.keys():
-        plt.errorbar(d_v_divid_Z, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nAccuracy score vs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_errorbar_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # accuracy score of early fusion divided by 
-    # accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['early fusion']/df_scores_means[view], '.-', label='early fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for early fusion / accuracy score for each view for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRatio accuracy score for early fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_majority_view_divid_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # accuracy score of late fusion divided by 
-    # accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['late fusion']/df_scores_means[view], '.-', label='late fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for late fusion / accuracy score for each view for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRation accuracy score for late fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_all_view_divid_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    return df_scores_means, df_scores_std, df_scores_error
-
-
-def score_multiviews_Z_factor_Mumbo(Z_factor_list, path_graph, cv=10, classifier="SVM", 
-                                    classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()},
-                                    n_samples=1000, n_views=3, n_classes=2, 
-                                    R=2/3, n_clusters_per_class=2, 
-                                    class_sep_factor=2, n_informative_divid=1, 
-                                    d=4, D=10, standard_deviation=2):
-    """
-    Returns 3 DataFrames (first with accuracy score, the second with the 
-    standard deivation of accuracy score and the third with the error rate) of 
-    latent space, views, early fusion predictions (concatenate views 
-    predictions) and late fusion predictions (majority views predictions) with 
-    sum of views dimension divided by Z_factor_list as index for the indicated
-    classifier and for Mumbo classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score vs sum of views dimension divided by Z_factor_list and a 
-    graph represented error rate (1 - accuracy score) vs sum of views dimension 
-    divided by Z_factor_list
-    
-    Parameters:
-    -----------
-    Z_factor_list : list
-                    each element from Z_factor_list defines a new dataset with 
-                    element as Z_factor 
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, R, n_clusters_per_class, class_sep_factor, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    3 DataFrames with Z_factor_list as index
-    """
-    # Z_factor_list  = list of diverse values of Z_factor from the highest to the lowest
-    Z_factor_list.sort(reverse=True)
-    # list of sum of views dimension for each Z_factor_list item
-    d_v = []
-    # list of Z dimension for each Z_factor_list item
-    Z_dim_list = []
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # same views have same colors on each graph
-    dict_colors = {'latent space':0}
-    dict_colors.update({'view'+str(i):0 for i in range(n_views)})
-    prop_cycle = plt.rcParams['axes.prop_cycle']
-    colors = prop_cycle.by_key()['color']
-    for key, c in zip(dict_colors.keys(), colors):
-        dict_colors[key] = c    
-    dict_colors.update({'early fusion':'purple', 'late fusion':'maroon', 'Mumbo':'midnightblue'})
-    # dictionary contains mean of accuracy scores per Z_factor
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[], 'Mumbo':[]})
-    # dictionary contains error rate per Z_factor
-    dict_scores_error = {'latent space':[]}
-    dict_scores_error.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_error.update({'early fusion':[], 'late fusion':[], 'Mumbo':[]})
-    # dictionary contains standard deviation of accuracy scores per Z_factor
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[], 'Mumbo':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-        
-    for Z_factor in Z_factor_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-        view_index = [0]  # for Mumbo
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-            view_index.append(len(multiviews_list[i][1])+view_index[i])
-            
-        concat = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views        
-        # dimension = number of columns
-        d_v.append(concat.shape[1])
-        Z_dim_list.append(Z.shape[1])
-        # dictionary of data
-        dict_data_df = {'latent space':Z}
-        dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data_df.update({'early fusion':concat})
-        
-        for key in dict_data_df.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_error[key].append(1 - score.mean())
-            dict_scores_std[key].append(score.std())
-        
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_error['late fusion'].append(1 - mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-        # Mumbo        
-        skf = StratifiedKFold(n_splits=cv, random_state=1, shuffle=True)  
-        # provides cv train/test indices to split data in cv train/test sets
-        score = []
-        for train_index, test_index in skf.split(concat, y):
-            # splits data and integer label of one view in test and train sets
-            train, test = concat[train_index], concat[test_index]    
-            y_train, y_test =  y[train_index], y[test_index]
-            # trains the classifier and tests it with test set               
-            clf = MumboClassifier()
-            clf.fit(train, y_train, view_index)
-            y_pred = clf.predict(test)
-            score.append(accuracy_score(y_test, y_pred))
-            
-        score = np.array(score)
-        dict_scores_means['Mumbo'].append(score.mean())
-        dict_scores_error['Mumbo'].append(1 - score.mean())
-        dict_scores_std['Mumbo'].append(score.std())
-
-    d_v_divid_Z = np.divide(np.array(d_v), np.array(Z_dim_list))
-    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=d_v_divid_Z)
-    df_scores_error = pd.DataFrame(dict_scores_error, index=d_v_divid_Z)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=d_v_divid_Z)
-    
-    # index and label for graphics
-    label_index = [chr(i) for i in range(ord('a'),ord('z')+1)]
-    label_index = label_index[0:len(d_v)]
-    label_value = ""
-    for label, v_Z, dim_v, dim_Z, Z_factor, percent, n_informative in zip(label_index, d_v_divid_Z, d_v, Z_dim_list, Z_factor_list, unsued_dimensions_percent_list, n_informative_list):
-        label_value = label_value + label+" : V/Z = "+str(round(v_Z, 4))+", V = "+str(dim_v)+", Z = "+str(dim_Z)+", Z_factor = "+str(Z_factor)+", % ="+str(percent)+", n_informative = "+str(n_informative)+'\n'
-
-    x_label = "V/Z = sum of views dimension divided by latent space dimension with :\nV = sum of views dimension\nZ = latent space dimension multiplied by Z_factor\n% = percentage of dimensions of latent space unsued in views\nn_informative = number of informative features"
-    
-    plt.figure(figsize=(10, 10))  # accuracy score vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means[key], '.-', color=dict_colors[key], label=key)
-
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Accuracy score for "+classifier+" and Mumbo")
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nAccuracy score vs ratio sum of views dimension / latent space dimension for classifiers "+classifier+" and Mumbo")    
-    plt.savefig(path_graph+"score_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_Mumbo_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # error rate vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_error[key], '.-', color=dict_colors[key], label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Error rate for "+classifier+" and Mumbo")
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nError rate vs ratio sum of views dimension / latent space dimension for classifiers "+classifier+" and Mumbo")    
-    plt.savefig(path_graph+"error_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_Mumbo_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    plt.figure(figsize=(10, 10))  # accuracy score of early fusion divided by 
-    # accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['early fusion']/df_scores_means[view], '.-', label='early fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for early fusion / accuracy score for each view for "+classifier+" and Mumbo")
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRatio accuracy score for early fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifiers "+classifier+" and Mumbo")
-    plt.savefig(path_graph+"score_Z_factor_majority_view_divid_"+str(n_views)+"_Mumbo_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # accuracy score of late fusion divided by 
-    # accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['late fusion']/df_scores_means[view], '.-', label='late fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for late fusion / accuracy score for each view for "+classifier+" and Mumbo")
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRation accuracy score for late fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifiers "+classifier+" and Mumbo")
-    plt.savefig(path_graph+"score_Z_factor_all_view_divid_"+str(n_views)+"_"+str(round(R, 4))+"_Mumbo_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    return df_scores_means, df_scores_std, df_scores_error
-
-
-def score_multiviews_n_views_R(n_views_list, R_list, path_graph, cv=10, 
-                               classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                               n_samples=1000, n_classes=2, Z_factor=1, 
-                               n_clusters_per_class=2, class_sep_factor=2, 
-                               n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns a dictionary with n_views_list as key containing a list of 
-    DataFrames (represented accuracy score divided by accuracy score for R=1 <> 
-    redundancy null) of views, early fusion predictions (concatenate views 
-    predictions and late fusion predictions (majority views predictions) with 
-    R_list as index for the indicated classifier per key
-    Creates and saves (at the indicated path path_graph) a graph per value of 
-    n_views_list represented accuracy score divided by accuracy score for R=1 
-    vs R_list
-    
-    Parameters:
-    -----------
-    n_views_list : list
-                   each element from n_views_list defines a new dataset with 
-                   element as n_views 
-    R_list : list
-             each element from R_list defines a new dataset with element as R                   
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_classes, Z_factor, n_clusters_per_class, class_sep_factor, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    a dictionary with n_views_list as key containing a list of DataFrames 
-    (represented accuracy score divided by accuracy score for R=1 <> redundancy 
-    null) with R_list as index per value of n_views_list
-    """
-    dict_n_views_R_ratio = {key:0 for key in n_views_list}
-    # n_views_list  = list of diverse values of n_views from the lowest to the highest
-    n_views_list.sort(reverse=False)
-    # same views have same colors on each graph
-    dict_colors = {'view'+str(i):0 for i in range(n_views_list[-1])}
-    prop_cycle = plt.rcParams['axes.prop_cycle']
-    colors = prop_cycle.by_key()['color']
-    for key, c in zip(dict_colors.keys(), colors):
-        dict_colors[key] = c    
-    dict_colors.update({'early fusion':'purple', 'late fusion':'maroon'})
-    
-    for n_views in n_views_list:    
-        # R_list  = list of diverse values of R from the lowest to the highest
-        R_list.sort(reverse=False)
-        # list of percentage of unsued columns of latent space in views
-        unsued_dimensions_percent_list = []
-        # list of number of informative features of latent space
-        n_informative_list = []
-        # dictionary contains mean of accuracy scores per R
-        dict_scores_means = {'view'+str(i):[] for i in range(n_views)}
-        dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-        # dictionary of list of scores' mean of view for diverse R divided by 
-        #score's mean of view for R = 1 (<> redundancy null)
-        dict_scores_ratio_R_1 = {'view'+str(i):0 for i in range(n_views)}
-        dict_scores_ratio_R_1.update({'early fusion':0, 'late fusion':0})
-        # dictionary contains data of each view
-        dict_views = {'view'+str(i):0 for i in range(n_views)}
-        
-        for R in R_list:
-            Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-            unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-            n_informative_list.append(n_informative)
-            
-            for i in range(n_views):
-                # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-                dict_views['view'+str(i)] = multiviews_list[i][0]
-                
-            early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-            # = concatenation of all views
-            # dictionary of data
-            dict_data_df = {'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)}
-            dict_data_df.update({'early fusion':early_fusion})
-                    
-            for key in dict_data_df.keys():
-                clf = classifier_dictionary[classifier]
-                score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-                dict_scores_means[key].append(score.mean())
-            
-            mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-            dict_scores_means['late fusion'].append(mean_majority)
-        
-        for key in dict_scores_means.keys():
-            score_R_1 = dict_scores_means[key][-1]  # R = 1 = last value of 
-            # R_list => last score value in dict_scores_means[key]
-            dict_scores_ratio_R_1[key] = np.divide(np.array(dict_scores_means[key]), score_R_1)
-                
-        df_scores_ratio_R_1 = pd.DataFrame(dict_scores_ratio_R_1, index=R_list)
-
-        plt.figure()
-        for key in dict_scores_means.keys():
-            plt.plot(R_list, dict_scores_ratio_R_1[key], '.-',  color=dict_colors[key], label=key)
-        # index and label for graphic
-        label_index = []
-        R_label = []
-        for i in range(0, len(R_list), 4):
-            R_label.append(R_list[i])
-            label_index.append(str(round(R_list[i], 2))+'\n'+str(unsued_dimensions_percent_list[i])+'\n'+str(n_informative_list[i]))
-        
-        plt.xticks(R_label, label_index, fontsize='medium', multialignment='center')  # new x indexes
-        plt.xlabel("R\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-        plt.ylabel("Ratio accuracy score / accuracy score for R = 1 for "+classifier)
-        plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-        plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nRatio accuracy score / accuracy score for R = 1\n(redundancy null) vs R for classifier "+classifier)
-        plt.savefig(path_graph+"score_R_divid_R_1_"+str(n_views)+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-        plt.show()
-        plt.close()
-            
-        dict_n_views_R_ratio[n_views] = df_scores_ratio_R_1
-        
-    plt.figure()
-    ax = plt.axes(projection="3d")
-    
-    for n_views in n_views_list:
-        for key in dict_n_views_R_ratio[n_views].keys():
-            if n_views == n_views_list[-1]:  # print legends only once
-                ax.plot(R_list, dict_n_views_R_ratio[n_views][key], n_views, color=dict_colors[key], label=key)
-            else:
-                ax.plot(R_list, dict_n_views_R_ratio[n_views][key], n_views, color=dict_colors[key])
-    
-    ax.set_xlabel("R")
-    ax.set_ylabel("Ratio accuracy score / accuracy score for R = 1 for "+classifier)
-    ax.set_zlabel("Number of views")
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of samples = "+str(n_samples)+" - factor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nRatio accuracy score / accuracy score for R = 1 (redundancy null) vs R, number of views for classifier "+classifier)
-    plt.savefig(path_graph+"score_R_divid_R_1_all_n_views"+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-
-    return dict_n_views_R_ratio
-
-
-def score_multiviews_class_sep(class_sep_factor_list, path_graph, cv=10, 
-                               classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                               n_views=3, n_samples=1000, n_classes=2, 
-                               Z_factor=1, R=2/3, n_clusters_per_class=2, 
-                               n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the 
-    standard deivation of accuracy score) of latent space, views, early fusion 
-    predictions (concatenate views predictions) and late fusion predictions 
-    (majority views predictions) with class_sep_factor_list as index for the 
-    indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score (with confidence interval) vs class_sep_factor_list
-    
-    Parameters:
-    -----------
-    class_sep_factor_list : list
-                            each element from n_samples_list defines a new 
-                            dataset
-    path_graph : str
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, 
-    n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per class_sep_factor
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per class_sep_factor
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for class_sep_factor in class_sep_factor_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-        
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-                
-        for key in dict_data.keys():
-            print('key', key)
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-        
-        print(dict_scores_means)
-                    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=class_sep_factor_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=class_sep_factor_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(class_sep_factor_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_samples, percent, n_informative in zip(class_sep_factor_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_samples)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(class_sep_factor_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Factor (class_sep = factor*n_clusters_per_class)\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs factor of class_sep for classifier "+classifier)
-    plt.savefig(path_graph+"score_class_sep_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
-
-
-def score_multiviews_n_informative_divided(n_informative_divid_list, path_graph, 
-                                           cv=10, classifier="SVM", 
-                                           classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, 
-                                           n_views=3, n_samples=1000, 
-                                           n_classes=2, Z_factor=1, R=2/3, 
-                                           n_clusters_per_class=2, 
-                                           class_sep_factor=2, d=4, D=10, 
-                                           standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the 
-    standard deivation of accuracy score) of latent space, views, early fusion 
-    predictions (concatenate views predictions) and late fusion predictions 
-    (majority views predictions) with n_informative_divid_list as index for the
-    indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented 
-    accuracy score (with confidence interval) vs n_informative_divid_list
-    
-    Parameters:
-    -----------
-    n_informative_divid_list : list
-                                 each element from n_informative_divid_list 
-                                 defines a new dataset with element as 
-                                 n_informative_divid
-    path_graph : str
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, 
-    class_sep_factor, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per n_informative_divid
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per 
-    #n_informative_divid
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for n_informative_divid in n_informative_divid_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-        
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  
-        # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-                
-        for key in dict_data.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-
-    df_scores_means = pd.DataFrame(dict_scores_means, index=n_informative_divid_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=n_informative_divid_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(n_informative_divid_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_informative_divid, percent, n_informative in zip(n_informative_divid_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_informative_divid)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(n_informative_divid_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Factor (n_informative = dimension of latent space / factor)\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs n_informative_divid for classifier "+classifier)
-    plt.savefig(path_graph+"score_n_informative_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
diff --git a/multiview_generator/_old_multiviews_datasets.py b/multiview_generator/_old_multiviews_datasets.py
deleted file mode 100644
index b734179..0000000
--- a/multiview_generator/_old_multiviews_datasets.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov 26 15:38:38 2019
-
-@author: bernardet
-"""
-
-from sklearn.datasets import make_classification
-from random import gauss
-from math import ceil, floor
-import numpy as np
-import pandas as pd
-
-
-def latent_space_dimension(views_dimensions_list, R):
-    """
-    Returns the minimal dimension of latent space (enough to build the dataset) for generator_multiviews_dataset compared to views_dimensions_list
-    
-    Parameters:
-    -----------
-    views_dimensions_list : list
-    R : float
-        
-    Returns:
-    --------
-    an int
-    """
-    max_view_dimension = max(views_dimensions_list)
-    dimension = ceil(R*sum(views_dimensions_list))
-    
-    if dimension < max_view_dimension:
-        dimension = max_view_dimension
-            
-    reduced_dimension = dimension
-    remove_sum = 0
-    
-    for num_view in range(1, len(views_dimensions_list)):
-        view_prec = views_dimensions_list[num_view - 1]
-        view_current = views_dimensions_list[num_view]
-        remove = floor(R*view_prec)
-        remove_sum += remove
-        if reduced_dimension - remove < view_current:
-            dimension += view_current - (reduced_dimension - remove)
-        reduced_dimension = dimension - remove_sum
-            
-    return dimension
-  
-
-def projection(latent_space, chosen_columns_list):
-    """
-    Returns the projection of latent_space on the columns of chosen_columns_list (in chosen_columns_list order)
-    
-    Parameters:
-    -----------
-    latent_space : array
-    chosen_columns_list : list
-        
-    Returns:
-    --------
-    an array of dimension (number of rows of latent_space, length of chosen_columns_list)
-    """
-    return latent_space[:, chosen_columns_list]
-
-
-def generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation, random_state=42):
-    """
-    Returns a multiview_generator multiviews dataset
-    
-    Parameters:
-    -----------
-    n_samples : int
-                dataset number of samples (number of rows of dataset)
-    n_views : int >= 2
-              dataset number of views
-              one view is a set of some features (columns) of the latent space
-    n_classes : int >= 2
-                dataset number of classes 
-    Z_factor : float >= 1
-               minimal dimension of the latent space (enough to build the dataset) is calculed then multiplied by Z_factor 
-    R : 0 <= float <= 1
-        R = 1 <> no possibility of redundancy between views
-        R = 0 <> maximal possibility of redundancy between views
-    n_clusters_per_class : int
-    class_sep_factor : float
-                       class_sep = n_clusters_per_class*class_sep_factor
-    n_informative_divid : float >= 1
-                          n_informative_divid raises <> number of non-informative features raises
-                          n_informative_divid = 1 <> no non-informative features, number of informative features = dimension of latent space
-                          number of informative features = round(dimension of latent space / n_informative_divid)
-    d : float >= 1
-        minimal dimension of views
-        dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-    D : float >= d
-        maximal dimension of views
-        dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-    standard_deviation : float
-                         standard deviation of the gaussian distribution N((d+D)/2, standard_deviation^2)
-                         dimension of views (int) chosen randomly from N((d+D)/2, standard_deviation^2) with d <= dimension of views <= D
-        
-    Returns:
-    --------
-    Z : an array of dimension(n_samples, R*n_views) = the generated samples
-    y : an array of dimension (n_samples) = the integer labels for class membership of each sample
-    a list of n_views tuples (X_v, I_v) with :
-        X_v = Z projected along d_v (= dimension of the v-ith views) columns in I_v
-        I_v = X_v columns numeros with numberring of Z columns numeros
-    unsued_dimensions_percent : percentage of unsued columns of latent space in views
-    n_informative : number of informative features (dimension of latent space - n_informative = number of non informative features)
-    """
-    
-    if n_views < 2:
-        raise ValueError("n_views >= 2")
-    if n_classes < 2:
-        raise ValueError("n_classes >= 2")
-    if Z_factor < 1:
-        raise ValueError("Z_factor >= 1 pour le bon fonctionnement de l'algorithme")
-    if d < 1:
-        raise ValueError("d >= 1")
-    if (d+D)/2 - 3*standard_deviation < 0:
-        raise ValueError("Il faut que (d+D)/2 - 3*standard_deviation >= 0 pour avoir des valeurs positives lors de l'emploi de la loi normale")
-    
-    # n_views dimension of view v values randomly from N((d+D)/2, standard_deviation^2)
-    d_v = np.random.normal(loc=(d+D)/2, scale=standard_deviation, size=n_views)
-    d_v = list(d_v)
-    remove_list, add_list = [], []
-    for dim_view in d_v:
-        if dim_view < d or dim_view > D:  # 1 <= d <= dim_view <= D
-            remove_list.append(dim_view)
-            add = -1
-            while add < d or add > D:
-                add = gauss((d+D)/2, standard_deviation)
-            add_list.append(add)
-    d_v = [view for view in d_v if view not in remove_list] + add_list
-    d_v = [int(view) for view in d_v]  # dimension of views = integer
-    # d_v = list of views dimension from the highest to the lowest
-    d_v.sort(reverse=True)
-    # Dimension of latent space Z (multiplied by Z_factor)
-    dim_Z = Z_factor*latent_space_dimension(d_v, R)
-    # Number of informative features
-    n_informative = round(dim_Z/n_informative_divid)
-    # Generation of latent space Z
-    Z, y = make_classification(n_samples=200, n_features=10, n_informative=2, n_redundant=0,
-                               n_repeated=0, n_classes=2, n_clusters_per_class=1, weights=None,
-                               flip_y=0, class_sep=100, random_state=random_state, shuffle=False)
-        
-    I_q = np.array([i for i in range(Z.shape[1])])  # 1D-array of Z columns numero
-    meta_I_v = []
-    results = []
-    for view in range(n_views):
-        # choice d_v[view] numeros of Z columns uniformly from I_q
-        I_v = np.random.choice(I_q, size=d_v[view], replace=False)  # tirage dans I_q sans remise de taille d_v[view]
-        meta_I_v += list(I_v)
-        # projection of Z along the columns in I_v
-        X_v = projection(Z, I_v)
-        results.append((X_v, I_v))
-        # remove R*d_v[view] columns numeros of I_v form I_q
-        elements_to_remove = np.random.choice(I_v, size=floor(R*d_v[view]), replace=False)  # tirage dans I_v sans remise de taille floor(R*d_v[view])
-        I_q = np.setdiff1d(I_q, elements_to_remove)  # I_q less elements from elements_to_remove
-    unsued_dimensions_list = [column for column in I_q if column not in meta_I_v]
-    unsued_dimensions_percent = round((len(unsued_dimensions_list) / dim_Z)*100, 2)
-    return Z, y, results, unsued_dimensions_percent, n_informative
-
-
-def results_to_csv(path, latent_space, integer_labels, multiviews_list):
-    """
-    Create length of multiviews_list + 2 csv files to the indicated path
-    Files name :
-        latent_space.csv for latent_space
-        integer_labels.csv for integer_labels
-        view0.csv for multiviews_list[0]
-    
-    Parameters:
-    -----------
-    path : str
-    latent_space : array
-    integer_labels : 1D array
-    multiviews_list : list of tuples
-        
-    Returns:
-    --------
-    None
-    """
-    df_latent_space = pd.DataFrame(latent_space)
-    df_latent_space.to_csv(path+'latent_space.csv', index=False)
-    
-    df_labels = pd.DataFrame(integer_labels)
-    df_labels.to_csv(path+'integer_labels.csv', index=False)
-    
-    cpt = 0
-    for view_tuple in multiviews_list:
-        df_view = pd.DataFrame(view_tuple[0], columns=view_tuple[1])
-        df_view.to_csv(path+'view'+str(cpt)+'.csv', index=False)
-        cpt += 1
diff --git a/multiview_generator/_old_parameters.py b/multiview_generator/_old_parameters.py
deleted file mode 100644
index 25d6d53..0000000
--- a/multiview_generator/_old_parameters.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov 26 13:53:05 2019
-
-@author: bernardet
-"""
-from sklearn.svm import SVC
-from sklearn.naive_bayes import GaussianNB
-import numpy as np
-
-# General parameters
-n_samples = 1000
-# number of samples (int)
-n_views = 3
-# number of views >= 2 (int)
-n_classes = 2
-# number of classes >= 3 (int)
-Z_factor = 250
-# multiplication factor of Z dimension (default value = 1)
-R = 2/3
-# redondance (float)
-cv = 10
-# number of cross-validation splitting (int)
-n_clusters_per_class = 2
-# number of clusters per class >= 1 (int)
-class_sep_factor = 2
-# factor >= 1 as class_sep = n_clusters_per_class*class_sep_factor
-n_informative_divid = 1
-# factor >= 1 as number of informative features = round(dimension of latent space / n_informative_divid)
-classifier = "SVM"
-# name of classifier (str)
-classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}
-# dictionary of classifiers
-n_samples_list = [100, 500, 1000, 1500, 2000]#, 2500, 3000]#, 3500, 4000, 5000, 7000, 10000]
-# list of number of samples to test multiview_generator
-R_list = list(np.arange(0, 1.05, 0.05))
-# list of diverse R
-Z_factor_list = [1, 3, 10, 25, 100, 250, 1000]#[25, 50, 75, 100, 150, 200, 250, 500, 600, 750, 800, 900, 1000]
-# list of diverse Z_factor
-n_views_list = [n_view for n_view in range(2, 10)]
-# list of diverse n_views
-class_sep_factor_list = [2, 5, 10]
-# list of diverse class_sep_factor
-n_informative_divid_list = [1, 2, 3]
-# list of diverse n_informative_divid
-path_data = "/home/bernardet/Documents/StageL3/Data/"
-# path to register the multiview dataset
-path_graph = "/home/bernardet/Documents/StageL3/Graph/"
-# path to register scores graph
-
-
-
-# Parameters of gaussian distribution N((d+D)/2,  standard_deviation_2) :
-# d <= dim[v] <= D for all v
-# (d+D)/2 - 3*sqrt(standard_deviation_2) >= 0
-d = 4
-# < D, > 0
-D = 10
-# > d
-standard_deviation = 2
-# standard deviation of the gaussian distribution
-
-
-# make_classification parameters :
-
-# a trouver comment les utiliser
-part_informative = 0
-# proportion of informative features (float between 0 and 1)
-part_redundant = 1
-# proportion of redundant features (float between 0 and 1)
-# n_redundant >= 1 for redundant
-part_repeated = 1
-# # proportion of repeated features (float between 0 and 1)
-# n_repeated >= 1 for useless features and correlation
-
-
-weights = [0.7, 0.3]
-# proportion of samples assigned to each class (list) len(weights) = nbr_features
-# != [0.5, 0.5] / = [0.8, 0.2] for imbalance
-flip_y = 0.1
-# fraction of samples whose class are randomly exchanged (float) 
-# > 0 for noise
diff --git a/multiview_generator/_old_result.py b/multiview_generator/_old_result.py
deleted file mode 100644
index dfd27f5..0000000
--- a/multiview_generator/_old_result.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Nov 27 16:14:14 2019
-
-@author: bernardet
-"""
-import parameters
-from multiviews_datasets import generator_multiviews_dataset, results_to_csv
-from tests.test_classifier import score_multiviews_n_samples, graph_comparaison_classifier_scores_n_samples, score_multiviews_R, score_multiviews_Z_factor, score_multiviews_n_views_R, score_multiviews_class_sep, score_one_multiview_dataset, score_multiviews_n_informative_divided
-
-import warnings
-warnings.simplefilter(action='ignore', category=FutureWarning)
-
-n_samples = parameters.n_samples
-n_views = parameters.n_views
-n_classes = 3#parameters.n_classes
-Z_factor = parameters.Z_factor
-R = parameters.R
-n_clusters_per_class = 1#parameters.n_clusters_per_class
-class_sep_factor = 2#5#2#parameters.class_sep_factor
-n_informative_divid = 2#parameters.n_informative_divid
-cv = parameters.cv
-classifier = parameters.classifier
-classifier_dictionary = parameters.classifier_dictionary
-d = parameters.d
-D = parameters.D
-standard_deviation = parameters.standard_deviation
-path_data = parameters.path_data
-path_graph = parameters.path_graph
-n_samples_list = parameters.n_samples_list
-R_list = parameters.R_list
-Z_factor_list = parameters.Z_factor_list
-n_views_list = parameters.n_views_list
-class_sep_factor_list = parameters.class_sep_factor_list
-n_informative_divid_list = parameters.n_informative_divid_list
-
-
-# Generate one dataset
-#Z, y, multiviews_list, unsued_columns_percent = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(Z, y, multiviews_list)
-
-# Register one multiview dataset
-#results_to_csv(path, Z, y, multiviews_list)
-
-# Score of one multiview dataset
-#df_dimensions, df_scores_means, df_scores_std = score_one_multiview_dataset(cv, classifier, classifier_dictionary, n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(df_dimensions, df_scores_means, df_scores_std)
-
-# Scores of n_samples_list datasets
-#mean_samples, std_samples = score_multiviews_n_samples(n_samples_list, path_graph, cv, classifier, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(mean_samples, std_samples)
-
-# Plot scores classifier2 vs score classifier1
-classifier1 = "SVM"
-classifier2 = "NB"
-#graph_comparaison_classifier_scores_n_samples(classifier1, classifier2, n_samples_list, path_graph, cv, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-
-# Scores of R_list datasets
-#mean_R, std_R = score_multiviews_R(R_list, path_graph, cv, classifier, classifier_dictionary, n_samples, n_views, n_classes, Z_factor, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(mean_R, std_R)
-
-# Scores of Z_factor_list datasets
-#mean_Z, std_Z, error_Z = score_multiviews_Z_factor(Z_factor_list, path_graph, cv, classifier, classifier_dictionary, n_samples, n_views, n_classes, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(mean_Z, std_Z, error_Z)
-
-# Scores divided by scores for R=1 (redundancy null) of n_views_list and R_list datasets
-#dict_n_views_R_ratio = score_multiviews_n_views_R(n_views_list, R_list, path_graph, cv, classifier, classifier_dictionary, n_samples, n_classes, Z_factor, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-#print(dict_n_views_R_ratio)
-
-# Scores of class_sep_factor_list datasets 
-#df_mean, df_std = score_multiviews_class_sep(class_sep_factor_list, path_data, path_graph, cv, classifier, classifier_dictionary, n_views, n_samples, n_classes, Z_factor, R, n_clusters_per_class, n_informative_divid, d, D, standard_deviation)
-#print(df_mean, df_std)
-
-# Scores of n_informative_divid_list datasets 
-#mean_n_info, std_n_info = score_multiviews_n_informative_divided(n_informative_divid_list, path_graph, cv, classifier, classifier_dictionary, n_views, n_samples, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, d, D, standard_deviation)
-#print(mean_n_info, std_n_info)
-
-
-Z_factor_list = [1, 3, 10, 25, 100, 250, 1000]
-path_graph = "/home/bernardet/Documents/StageL3/Graph/n_views_3_10_1_clus_2_n_info_div/"
-n_classes = 2
-n_clusters_per_class = 1
-class_sep_factor = 2
-n_informative_divid = 2
-for n_views in range(3, 11):
-    n_samples = 500*n_views
-    mean_Z, std_Z, error_Z = score_multiviews_Z_factor(Z_factor_list, path_graph, cv, classifier, classifier_dictionary, n_samples, n_views, n_classes, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
diff --git a/multiview_generator/_old_update_baptiste.py b/multiview_generator/_old_update_baptiste.py
deleted file mode 100644
index 1e99488..0000000
--- a/multiview_generator/_old_update_baptiste.py
+++ /dev/null
@@ -1,635 +0,0 @@
-import os
-from abc import abstractmethod, ABC
-from sklearn.tree import DecisionTreeClassifier
-from sklearn.metrics import accuracy_score, zero_one_loss
-from sklearn.datasets import make_classification
-from sklearn.model_selection import StratifiedKFold
-import yaml
-import numpy as np
-from math import ceil, floor
-import pandas as pd
-import h5py
-
-
-class MultiviewDatasetGenetator():
-    def __init__(self, n_samples=100, n_views=2, n_classes=2,
-                 # Z_factor=2,
-                 # R=0,
-                 n_clusters_per_class=1,
-                 class_sep=1.0,
-                 # n_informative_divid=2,
-                 lower_dim=4,
-                 higher_dim=10,
-                 # standard_deviation=2,
-                 class_weights=None,
-                 flip_y=0.0,
-                 # n_informative_weights=None,
-                 random_state=42, config_path=None,
-                 example_subsampling_method="block",
-                 example_subsampling_config={},
-                 feature_subampling_method="block",
-                 feature_subsampling_config={},
-                 redundancy=None,
-                 methods="uniform",
-                 view_dims=None,
-                 estimator_name="LOneOneScore",
-                 estimator_config={},
-                 build_method="iterative",
-                 precision=0.1,
-                 priority="random",
-                 confusion_matrix=None,
-                 n_informative=10,
-                 step=1):
-        if config_path is not None:
-            with open(config_path) as config_file:
-                args = yaml.safe_load(config_file)
-                self.__init__(**args)
-        else:
-            self.n_samples = n_samples
-            self.n_views = n_views
-            self.n_classes = n_classes
-            self.n_clusters_per_class = n_clusters_per_class
-            self.class_sep = class_sep
-            self.class_weights = class_weights
-            self.flip_y = flip_y
-            self.example_subsampling_method= example_subsampling_method
-            self.n_informative_weights = ""
-            self.feature_subampling_method = feature_subampling_method
-            self.feature_subsampling_config = feature_subsampling_config
-            self.example_subsampling_config = example_subsampling_config
-            self.redundancy = redundancy
-            self.estimator_name = estimator_name
-            self.build_method = build_method
-            self.precision = precision
-            self.priority = priority
-            self.n_informative = n_informative
-            self.estimator_config = estimator_config
-            self.step = step
-            if isinstance(methods, list):
-                self.methods = methods
-            elif isinstance(methods, str):
-                self.methods = [methods for _ in range(self.n_views)]
-            else:
-                raise ValueError("methods should be list or string not {}".format(type(methods)))
-            if isinstance(random_state, np.random.RandomState):
-                self.random_state = random_state
-            elif isinstance(random_state, int):
-                self.random_state = np.random.RandomState(random_state)
-            else:
-                raise ValueError("random_sate must be np.random.RandomState or int")
-            if view_dims is None:
-                # TODO
-                self.view_dims = self.random_state.randint(max(n_informative, lower_dim), high=max(n_informative+1,higher_dim), size=self.n_views)
-            else:
-                self.view_dims = np.asarray(view_dims)
-            if confusion_matrix is None:
-                self.input_confusion_matrix = ""
-            elif isinstance(confusion_matrix, str):
-                self.input_confusion_matrix = np.genfromtxt(input_confusion_matrix, delimiter=",", dtype=float)
-            else:
-                self.input_confusion_matrix = np.asarray(confusion_matrix)
-            # if error_matrix is not None:
-            #     self.error_matrix = error_matrix
-            # else:
-            #     self.error_matrix = self.random_state.uniform(size=(self.n_classes, self.n_views))
-            # self.n_informative_divid = n_informative_divid
-            # self.d = lower_dim
-            # self.D = higher_dim
-            # self.standard_deviation = standard_deviation
-            # self.Z_factor = Z_factor
-            # self.R = R
-
-
-    def to_csv(self, saving_path="."):
-        """
-        Create length of multiviews_list + 2 csv files to the indicated path
-        Files name :
-            latent_space.csv for latent_space
-            integer_labels.csv for integer_labels
-            view0.csv for multiviews_list[0]
-
-        Parameters:
-        -----------
-        path : str
-        latent_space : array
-        integer_labels : 1D array
-        multiviews_list : list of tuples
-
-        Returns:
-        --------
-        None
-        """
-        df_latent_space = pd.DataFrame(self.Z)
-        df_latent_space.to_csv(os.path.join(saving_path, 'latent_space.csv')
-                               , index=False)
-
-        df_labels = pd.DataFrame(self.y)
-        df_labels.to_csv(os.path.join(saving_path, 'integer_labels.csv'),
-                         index=False)
-
-        for view_index, view_tuple in enumerate(self.results):
-            df_view = pd.DataFrame(view_tuple[0], columns=view_tuple[1])
-            df_view.to_csv(os.path.join(saving_path,
-                                        'view'+str(view_index)+"_"+str(self.n_informative_weights[view_index])+'.csv'),
-                           index=False)
-
-    def to_hdf5(self, saving_path=".", name="generated_dset"):
-
-        dataset_file = h5py.File(os.path.join(saving_path, name+".hdf5"), 'w')
-
-        labels_dataset = dataset_file.create_dataset("Labels",
-                                                     shape=self.y.shape,
-                                                     data=self.y)
-
-        labels_names = ["Label_1", "Label_0"]
-
-        labels_dataset.attrs["names"] = [
-            label_name.encode() if not isinstance(label_name, bytes)
-            else label_name for label_name in labels_names]
-
-        for view_index, (data, feature_indices) in enumerate(self.results):
-            df_dataset = dataset_file.create_dataset("View" + str(view_index),
-                                                     shape=data.shape,
-                                                     data=data)
-
-            df_dataset.attrs["sparse"] = False
-            df_dataset.attrs["name"] = "GeneratedView"+str(view_index)+"_"+str(self.n_informative_weights[view_index])
-
-        meta_data_grp = dataset_file.create_group("Metadata")
-
-        meta_data_grp.attrs["nbView"] = len(self.results)
-        meta_data_grp.attrs["nbClass"] = np.unique(self.y)
-        meta_data_grp.attrs["datasetLength"] = \
-        self.results[0][0].shape[0]
-
-        meta_data_grp.create_dataset("example_ids", data=np.array(
-            ["gen_example_" + str(ex_indx) for ex_indx in
-             range(self.results[0][0].shape[0])]).astype(
-            np.dtype("S100")), dtype=np.dtype("S100"))
-
-        dataset_file.close()
-
-    def get_dataset(self):
-        self.latent_space, self.y = make_classification(n_samples=self.n_samples,
-                                                n_features=self.n_informative,
-                                                n_informative=self.n_informative,
-                                                n_redundant=0,
-                                                n_repeated=0,
-                                                n_classes=self.n_classes,
-                                                n_clusters_per_class=self.n_clusters_per_class,
-                                                weights=self.class_weights,
-                                                flip_y=self.flip_y,
-                                                class_sep =self.class_sep,
-                                                shuffle=False,
-                                                random_state=self.random_state)
-        self.view_matrices = [self.gen_random_matrix(self.methods[view_index],
-                                                     view_index)
-                              for view_index in range(self.n_views)]
-        self.output_confusion_matrix = np.zeros((self.n_classes, self.n_views))
-        for view_index in range(self.n_views):
-            for class_index in range(self.n_classes):
-                # Restarting with all the available features
-                self.available_feature_indices = np.arange(self.n_informative)
-                class_example_indices = np.where(self.y==class_index)[0]
-                self.get_class_subview(class_example_indices, view_index, class_index)
-
-    def gen_random_matrix(self, method, view_index , low=0, high=1):
-        if method == "uniform":
-            return self.random_state.uniform(low=low, high=high,
-                                             size=(self.n_samples,
-                                                   self.view_dims[view_index]))
-        else:
-            raise ValueError("Random matrix method {} is not defined".format(method))
-
-    def get_class_subview(self, class_examples_indices, view_index, class_index):
-        if self.build_method == "iterative":
-            n_examples = 1
-            n_features = 1
-            dist = 100
-            min_dist = 100
-            min_dist_score = 0
-            print("Restarting a view", self.available_feature_indices.shape[0])
-            while dist > self.precision:
-                if (n_examples >= self.latent_space.shape[0] and n_features >= self.latent_space.shape[1]) or self.available_feature_indices.shape[0]<1:
-                    raise ValueError("For view {}, class {}, unable to attain a score of {} with a "
-                                     "precision of {}, the best score "
-                                     "was {}".format(view_index, class_index, self.input_confusion_matrix[class_index, view_index],
-                                                     self.precision,
-                                                     min_dist_score))
-                chosen_example_indices = self.sub_sample_examples(class_examples_indices,
-                                                                  n_examples,
-                                                                  **self.example_subsampling_config)
-                chosen_features_indices = self.sub_sample_features(n_features,
-                                                                   **self.feature_subsampling_config)
-                couples = self.make_couples(chosen_example_indices, chosen_features_indices)
-                for [row_idx, col_idx] in couples:
-                    self.view_matrices[view_index][row_idx, col_idx] = self.latent_space[row_idx, col_idx]
-                estimator_value = self.get_estimator(chosen_features_indices,
-                                              chosen_example_indices,
-                                              self.view_matrices[view_index][class_examples_indices, :])
-                print(estimator_value)
-                # print("\t Target : {}, Val : {}".format(self.input_confusion_matrix[class_index, view_index], estimator_value))
-                # print("\t Examples : {}, Features : {}".format(n_examples, n_features))
-                dist = abs(self.input_confusion_matrix[class_index, view_index] - estimator_value)
-                if dist < min_dist:
-                    min_dist = dist
-                    min_dist_score = estimator_value
-                if self.priority == "both":
-                    n_examples += self.step
-                    n_features += self.step
-                elif self.priority=="examples":
-                    if n_examples < self.latent_space.shape[0]:
-                        n_examples += self.step
-                    else:
-                        n_examples = 1
-                        n_features += self.step
-                if self.priority == "features":
-                    if n_features < self.latent_space.shape[1]:
-                        n_features += self.step
-                    else:
-                        n_features = 1
-                        n_examples += self.step
-                if self.priority == "random":
-                    n_examples += self.random_state.randint(0, self.step+1)
-                    n_features += self.random_state.randint(0, self.step+1)
-        self.output_confusion_matrix[class_index, view_index] = estimator_value
-        self.remove_feature_indices(chosen_features_indices)
-
-    #TODO
-    def make_couples(self, row_indices, col_indices):
-        couples = [[row, col] for col in col_indices for row in row_indices]
-        return couples
-
-    def remove_feature_indices(self, indices):
-        if self.redundancy == None:
-            self.available_feature_indices = np.array([idx for idx in self.available_feature_indices if idx not in indices])
-        elif self.redundancy == "replace":
-            pass
-
-    def get_estimator(self, chosen_feature_indices, chosen_example_indices, class_subview):
-        if self.estimator_name == "LOneOneScore":
-            return LOneOneScore(self.random_state).score(self.latent_space, class_subview, self.y, patch_size=len(chosen_example_indices)*len(chosen_feature_indices))
-        elif self.estimator_name == "DTScore":
-            return DTScore(self.random_state).score(self.latent_space, class_subview, self.y,**self.estimator_config)
-        else:
-            raise ValueError("Estimator {} is not defined".format(self.estimator_name))
-
-    def sub_sample(self, indices, quantity, method, beginning=None):
-        if quantity > len(indices):
-            # TODO Warning
-            quantity = len(indices)
-        # print(quantity)
-        if method == "block":
-            if beginning is None and quantity!=len(indices):
-                beginning = self.random_state.randint(0, len(
-                    indices) - quantity)
-            if  beginning is not None and len(indices)-beginning > quantity :
-                return indices[beginning:quantity + beginning]
-            else:
-                #TODO Wargning on the size of the outût
-                return indices[-quantity:]
-        if method == "choice":
-            return self.random_state.choice(indices, quantity, replace=False)
-
-    def sub_sample_features(self, n_features, **kwargs):
-        return self.sub_sample(self.available_feature_indices, n_features, self.feature_subampling_method, **kwargs)
-
-    def sub_sample_examples(self, class_examples_indices, n_examples, **kwargs):
-        return self.sub_sample(class_examples_indices, n_examples, self.example_subsampling_method, **kwargs)
-
-    def to_hdf5_mc(self, saving_path=".", name="generated_dset"):
-
-        dataset_file = h5py.File(os.path.join(saving_path, name+".hdf5"), 'w')
-
-        labels_dataset = dataset_file.create_dataset("Labels",
-                                                     shape=self.y.shape,
-                                                     data=self.y)
-
-        labels_names = ["Label_1", "Label_0"]
-
-        labels_dataset.attrs["names"] = [
-            label_name.encode() if not isinstance(label_name, bytes)
-            else label_name for label_name in labels_names]
-
-        for view_index, data in enumerate(self.view_matrices):
-            df_dataset = dataset_file.create_dataset("View" + str(view_index),
-                                                     shape=data.shape,
-                                                     data=data)
-
-            df_dataset.attrs["sparse"] = False
-            df_dataset.attrs["name"] = "GeneratedView"+str(view_index)
-
-        meta_data_grp = dataset_file.create_group("Metadata")
-
-        meta_data_grp.attrs["nbView"] = len(self.view_matrices)
-        meta_data_grp.attrs["nbClass"] = np.unique(self.y)
-        meta_data_grp.attrs["datasetLength"] = \
-        self.view_matrices[0].shape[0]
-
-        meta_data_grp.create_dataset("example_ids", data=np.array(
-            ["gen_example_" + str(ex_indx) for ex_indx in
-             range(self.view_matrices[0].shape[0])]).astype(
-            np.dtype("S100")), dtype=np.dtype("S100"))
-
-        dataset_file.close()
-        np.savetxt(os.path.join(saving_path, "output_confusion_matrix.csv"),self.output_confusion_matrix)
-
-
-class InformationScorer(ABC):
-
-    def __init__(self, random_state):
-        self.random_state = random_state
-
-    @abstractmethod
-    def score(self, latent_space, class_subview, y):
-        pass
-
-
-class LOneOneScore(InformationScorer):
-
-    def score(self, latent_space, class_subview, y, patch_size=100):
-        return 1 - patch_size/(class_subview.shape[0]*latent_space.shape[1])
-
-class DTScore(InformationScorer):
-
-    def __init__(self, random_state):
-        super().__init__(random_state)
-
-    def score(self, latent_space, class_subview, y, scoring="zero_one_loss",
-              n_splits=7, low=None, high=None, sample_ratio=1, **kwargs):
-        dt = DecisionTreeClassifier(**kwargs)
-        if low is None:
-            low = np.min(class_subview)
-        if high is None:
-            high = np.max(class_subview)
-        dataset = np.concatenate((class_subview,
-                                 self.random_state.uniform(low=low, high=high,
-                                             size=(int(class_subview.shape[0]*sample_ratio),
-                                                   class_subview.shape[1]))),
-                                 axis=0)
-        y = np.zeros(int(class_subview.shape[0]*(1+sample_ratio)))
-        to_detect_indices = np.arange(class_subview.shape[0])
-        y[:-int(class_subview.shape[0]*sample_ratio)] = np.ones(int(class_subview.shape[0]*sample_ratio))
-        scores = np.zeros(n_splits)
-        skf = StratifiedKFold(n_splits=n_splits)
-        for fold_index, (train_indices, test_indices) in enumerate(skf.split(dataset, y)):
-            intersection = np.intersect1d(test_indices, to_detect_indices, assume_unique=True)
-            dt.fit(dataset[train_indices], y[train_indices])
-            pred = dt.predict(dataset[intersection])
-            if scoring=="zero_one_loss":
-                scores[fold_index] = zero_one_loss(y_true=y[intersection],
-                                                   y_pred=pred)
-            else:
-                raise ValueError("{} is not a valid scoring parameter".format(scoring))
-        return np.mean(scores)
-
-
-
-
-
-
-if __name__=="__main__":
-    n_samples = 500  # Number of samples in tha dataset
-    n_views = 4  # Number of views in the dataset
-    n_classes = 3  # Number of classes in the dataset
-    Z_factor = 10  # Z dim = latent_space_dim * z_factor
-    R = 0  # Precentage of non-redundant features in the view
-    n_clusters_per_class = 1  # Number of clusters for each class
-    class_sep = 0.1 # Separation between the different classes
-    n_informative = 100 # Divides the number of informative features in the latent space
-    flip_y = 0.00  # Ratio of label noise
-    random_state = 42
-    lower_dim = 50
-    higher_dim = 100
-    class_weights = None # The proportions of examples in each class
-    n_informative_weights = np.array([0.3,0.3,0.7,0.3,0.3])  # The proportion of the number of informative features for each view
-    input_confusion_matrix = np.array([np.array([0.4, 0.2, 0.3, 0.1]),
-                                       np.array([0.3, 0.3, 0.3, 0.1]),
-                                       np.array([0.05, 0.1, 0.3, 0.1])])
-    precision = 0.05
-    example_subsampling_method = "block"
-    feature_subampling_method = "block"
-    estimator_name = "DTScore"
-    estimator_config = {"max_depth": n_informative}
-
-    path = "/home/baptiste/Documents/Datasets/Generated/confusion/"
-    name = "confusion_test"
-    if not os.path.exists(path):
-        os.mkdir(path)
-    # class_seps = [0.1,0.3,0.5,0.6,0.7,0.9,1.0,2.0,10,100]
-    class_seps =[0.1]
-    scores_sep = np.zeros((len(class_seps), n_classes))
-    for sep_index, class_sep in enumerate(class_seps):
-
-        multiview_generator = MultiviewDatasetGenetator(n_samples=n_samples,
-                                                        n_views=n_views,
-                                                        n_classes=n_classes,
-                                                        n_clusters_per_class=n_clusters_per_class,
-                                                        class_sep=class_sep,
-                                                        n_informative=n_informative,
-                                                        lower_dim=lower_dim,
-                                                        higher_dim=higher_dim,
-                                                        flip_y=flip_y,
-                                                        class_weights=class_weights,
-                                                        random_state=random_state,
-                                                        example_subsampling_method=example_subsampling_method,
-                                                        feature_subampling_method=feature_subampling_method,
-                                                        confusion_matrix=input_confusion_matrix,
-                                                        precision=precision,
-                                                        estimator_name=estimator_name,
-                                                        )
-
-        def inter(arr1, arr2):
-            return np.array([i for i in arr1 if i in arr2])
-
-
-        ratio = 0.8
-        n_splits = 40
-
-        multiview_generator.get_dataset()
-        print(" Input : ")
-        print( multiview_generator.input_confusion_matrix)
-        print("\n Estimated Output : ")
-        print(multiview_generator.output_confusion_matrix)
-        scores = np.zeros((n_classes, n_views))
-        class_indices = [np.array([i for i in np.arange(n_samples) if multiview_generator.y[i]==k]) for k in range(n_classes)]
-        mets = np.zeros((n_classes, n_views, n_splits))
-        for view_index, view in enumerate(multiview_generator.view_matrices):
-            dt = DecisionTreeClassifier(max_depth=view.shape[1], random_state=random_state)
-            for _ in range(n_splits):
-                train_set = np.random.RandomState(42).choice(np.arange(view.shape[0]), size=int(view.shape[0]*ratio), replace=False)
-                test_set = np.array([i for i in np.arange(view.shape[0], dtype=int) if i not in train_set]).astype(int)
-                dt.fit(view[train_set, :], multiview_generator.y[train_set])
-                preds = np.zeros(n_samples)
-                preds[test_set] = dt.predict(view[test_set, :])
-                for k in range(n_classes):
-                    mets[k, view_index, _] = zero_one_loss(multiview_generator.y[inter(class_indices[k], test_set)], preds[inter(class_indices[k], test_set)])
-        print("\nDecision_tree output : ")
-        print(np.mean(mets, axis=2))
-        print("\n Decision tree on latent : ")
-        lat = multiview_generator.latent_space
-        sc =  np.zeros((n_classes, n_splits))
-        dt = DecisionTreeClassifier(max_depth=lat.shape[1],
-                                    random_state=random_state)
-        for _ in range(n_splits):
-            train_set = np.random.RandomState(42).choice(np.arange(lat.shape[0]),
-                                                         size=int(
-                                                             lat.shape[0] * ratio),
-                                                         replace=False)
-            test_set = np.array([i for i in np.arange(lat.shape[0], dtype=int) if
-                                 i not in train_set]).astype(int)
-            dt.fit(lat[train_set, :], multiview_generator.y[train_set])
-            preds = np.zeros(n_samples)
-            preds[test_set] = dt.predict(lat[test_set, :])
-            for k in range(n_classes):
-                sc[k, _] = zero_one_loss(
-                    multiview_generator.y[inter(class_indices[k], test_set)],
-                    preds[inter(class_indices[k], test_set)])
-        scores_sep[sep_index] = np.mean(sc, axis=1)
-    print(scores_sep)
-    # multiview_generator.to_hdf5_mc(saving_path=path, name=name)
-
-
-
-    # def generate(self):
-    #     if self.n_views < 2:
-    #         raise ValueError("n_views >= 2")
-    #     if self.n_classes < 2:
-    #         raise ValueError("n_classes >= 2")
-    #     if self.Z_factor < 1:
-    #         raise ValueError(
-    #             "Z_factor >= 1 pour le bon fonctionnement de l'algorithme")
-    #     if (self.R < 0) or (self.R > 1):
-    #         raise ValueError("0 <= R <= 1")
-    #     if self.n_clusters_per_class < 1:
-    #         raise ValueError("n_clusters_per_class >= 1")
-    #     if self.class_sep < 0:
-    #         raise ValueError("class_sep_factor >= 0")
-    #     if self.n_informative_divid < 1:
-    #         raise ValueError("n_informative_divid >= 1")
-    #     if self.d < 1:
-    #         raise ValueError("d >= 1")
-    #     if (self.d + self.D) / 2 - 3 * self.standard_deviation < 1:
-    #         raise ValueError(
-    #             "Il faut que (d+D)/2 - 3*standard_deviation >= 1 pour avoir des valeurs positives non nulles lors de l'emploi de la loi normale")
-    #     if self.error_matrix.shape != (self.n_classes, self.n_views):
-    #         raise "Error matrix must be of shape ({}, {}), it is of shape {}".format(self.n_classes, self.n_views, self.error_matrix.shape)
-    #
-    #     # n_views dimension of view v values randomly from N((d+D)/2, standard_deviation^2)
-    #     d_v = self.random_state.normal(loc=(self.d + self.D) / 2,
-    #                            scale=self.standard_deviation,
-    #                            size=self.n_views)
-    #     d_v = list(d_v)
-    #     remove_list, add_list = [], []
-    #     for dim_view in d_v:
-    #         if dim_view < self.d or dim_view > self.D:  # 1 <= d <= dim_view <= D
-    #             remove_list.append(dim_view)
-    #             add = -1
-    #             while add < self.d or add > self.D:
-    #                 add = self.random_state.normal((self.d + self.D) / 2, self.standard_deviation)
-    #             add_list.append(add)
-    #     d_v = [view for view in d_v if view not in remove_list] + add_list
-    #     d_v = [int(view) for view in d_v]  # dimension of views = integer
-    #     # d_v = list of views dimension from the highest to the lowest
-    #     d_v.sort(reverse=True)
-    #     # Dimension of latent space Z (multiplied by Z_factor)
-    #     self.dim_Z = self.Z_factor * self.latent_space_dimension(d_v)
-    #     # Number of informative features
-    #     self.n_informative = round(self.dim_Z / self.n_informative_divid)
-    #     # Generation of latent space Z
-    #     print("Dim Z : ", self.dim_Z, "N Info : ", self.n_informative, "View_dim", d_v)
-    #     self.Z, self.y = make_classification(n_samples=self.n_samples, n_features=self.dim_Z,
-    #                                          n_informative=self.n_informative, n_redundant=0,
-    #                                          n_repeated=0, n_classes=self.n_classes,
-    #                                          n_clusters_per_class=self.n_clusters_per_class,
-    #                                          weights=self.class_weights,
-    #                                          flip_y=self.flip_y,
-    #                                          class_sep=self.n_clusters_per_class * self.class_sep,
-    #                                          random_state=self.random_state, shuffle=False)
-    #     self.informative_indices = np.arange(self.dim_Z)[:self.n_informative]
-    #     I_q = np.arange(self.Z.shape[1])
-    #     meta_I_v = []
-    #     self.results = []
-    #     for view_index in range(n_views):
-    #         if self.n_informative_weights is not None and len(self.n_informative_weights)==n_views:
-    #             if self.n_informative*self.n_informative_weights[view_index] > d_v[view_index]:
-    #                 n_informative_view = int(self.n_informative*self.n_informative_weights[view_index])
-    #                 d_v[view_index] = n_informative_view
-    #                 I_v = self.random_state.choice(self.informative_indices,
-    #                                                size=n_informative_view,
-    #                                                replace=False)
-    #             else:
-    #                 n_informative_view = int(self.n_informative*self.n_informative_weights[view_index])
-    #                 informative_indices = self.random_state.choice(self.informative_indices,
-    #                                                size=n_informative_view,
-    #                                                replace=False)
-    #                 I_v = np.concatenate((informative_indices,
-    #                                      self.random_state.choice(I_q,
-    #                                                               size=d_v[view_index]-n_informative_view,
-    #                                                               replace=False)))
-    #         else:
-    #         # choice d_v[view] numeros of Z columns uniformly from I_q
-    #             I_v = self.random_state.choice(I_q, size=d_v[view_index],
-    #                                            replace=False)  # tirage dans I_q sans remise de taille d_v[view]
-    #         meta_I_v += list(I_v)
-    #         # projection of Z along the columns in I_v
-    #         X_v = self.projection( I_v)
-    #         self.results.append((X_v, I_v))
-    #         # remove R*d_v[view] columns numeros of I_v form I_q
-    #         elements_to_remove = self.random_state.choice(I_v,
-    #                                               size=floor(self.R * d_v[view_index]),
-    #                                               replace=False)  # tirage dans I_v sans remise de taille floor(R*d_v[view])
-    #         I_q = np.setdiff1d(I_q,
-    #                            elements_to_remove)  # I_q less elements from elements_to_remove
-    #     print("View_dim", d_v)
-    #     self.unsued_dimensions_list = [column for column in I_q if
-    #                               column not in meta_I_v]
-    #     self.unsued_dimensions_percent = round(
-    #         (len(self.unsued_dimensions_list) / self.dim_Z) * 100, 2)
-    #
-    # def projection(self, chosen_columns_list):
-    #     """
-    #     Returns the projection of latent_space on the columns of chosen_columns_list (in chosen_columns_list order)
-    #
-    #     Parameters:
-    #     -----------
-    #     chosen_columns_list : list
-    #
-    #     Returns:
-    #     --------
-    #     an array of dimension (number of rows of latent_space, length of chosen_columns_list)
-    #     """
-    #     return self.Z[:, chosen_columns_list]
-    #
-    # def latent_space_dimension(self, views_dimensions_list):
-    #     """
-    #     Returns the minimal dimension of latent space (enough to build the dataset) for generator_multiviews_dataset compared to views_dimensions_list
-    #
-    #     Parameters:
-    #     -----------
-    #     views_dimensions_list : list
-    #     R : float
-    #
-    #     Returns:
-    #     --------
-    #     an int
-    #     """
-    #     max_view_dimension = max(views_dimensions_list)
-    #     dimension = ceil(self.R * sum(views_dimensions_list))
-    #
-    #     if dimension < max_view_dimension:
-    #         dimension = max_view_dimension
-    #
-    #     reduced_dimension = dimension
-    #     remove_sum = 0
-    #
-    #     for num_view in range(1, len(views_dimensions_list)):
-    #         view_prec = views_dimensions_list[num_view - 1]
-    #         view_current = views_dimensions_list[num_view]
-    #         remove = floor(self.R * view_prec)
-    #         remove_sum += remove
-    #         if reduced_dimension - remove < view_current:
-    #             dimension += view_current - (reduced_dimension - remove)
-    #         reduced_dimension = dimension - remove_sum
-    #
-    #     return dimension
diff --git a/multiview_generator/_old_use_generator_baptiste.py b/multiview_generator/_old_use_generator_baptiste.py
deleted file mode 100644
index 437d14e..0000000
--- a/multiview_generator/_old_use_generator_baptiste.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-import numpy as np
-
-import parameters
-from multiviews_datasets import generator_multiviews_dataset, results_to_csv
-from tests.test_classifier import score_multiviews_n_samples, graph_comparaison_classifier_scores_n_samples, score_multiviews_R, score_multiviews_Z_factor, score_multiviews_n_views_R, score_multiviews_class_sep, score_one_multiview_dataset, score_multiviews_n_informative_divided
-
-import warnings
-warnings.simplefilter(action='ignore', category=FutureWarning)
-
-
-n_samples = 100
-n_views = 3
-n_classes = 2
-Z_factor = 1
-R = 0
-n_clusters_per_class = 1
-class_sep_factor = 100
-n_informative_divid = 1
-standard_deviation = 2
-d = 4
-D = 10
-
-path = "/home/baptiste/Documents/Datasets/Generated/try_outlier/"
-if not os.path.exists(path):
-    os.mkdir(path)
-
-Z, y, results, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes,
-                                                                                       Z_factor, R,
-                                                                                       n_clusters_per_class,
-                                                                                       class_sep_factor,
-                                                                                       n_informative_divid, d, D,
-                                                                                       standard_deviation)
-print(y[:10])
-print(unsued_dimensions_percent)
-print(n_informative)
-print(Z.shape)
-y[:10] = np.invert(y[:10].astype(bool)).astype(int)
-print(y[:10])
-results_to_csv(path, Z, y, results)
-
diff --git a/multiview_generator/tests/_old_test_classifier.py b/multiview_generator/tests/_old_test_classifier.py
deleted file mode 100644
index 8571ace..0000000
--- a/multiview_generator/tests/_old_test_classifier.py
+++ /dev/null
@@ -1,823 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Nov 28 14:14:46 2019
-
-@author: bernardet
-"""
-
-from multiviews_datasets import generator_multiviews_dataset, results_to_csv
-from sklearn.svm import SVC
-from sklearn.naive_bayes import GaussianNB
-from sklearn.model_selection import cross_val_score, StratifiedKFold
-from sklearn.metrics import accuracy_score
-from collections import Counter
-from mpl_toolkits.mplot3d import Axes3D
-from math import sqrt
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-
-
-def majority_list(predictions_list):
-    """
-    Returns an array which on each row the majority class of the same row in predictions_list
-    
-    Parameters:
-    -----------
-    predictions_list : list of 1D array
-        
-    Returns:
-    --------
-    an 1D array
-    """
-    n_samples = len(predictions_list[0])
-    # majority_prediction[i] = prediction of predictions_list[i] which appears the most on predictions_list[i]
-    majority_prediction = np.array([-1]*n_samples)
-    # concatenate_predictions_list[i] = list contains prediction of the i-th data per view
-    reshape_predictions_list = [predictions_list[i].reshape(len(predictions_list[i]), 1) for i in range(len(predictions_list))]
-    concatenate_predictions_list = np.hstack(reshape_predictions_list)
-    for sample in range(n_samples):
-        # dictionary contains predictions (key) and its occurences in concatenate_predictions_list[sample]
-        count = Counter(concatenate_predictions_list[sample])
-        maj_value = max(count.values())  # maximal number of a prediction
-        for key in count.keys():  # searchs the prediction with the maximal occurence number
-            if count[key] == maj_value:
-                majority_prediction[sample] = key
-                break
-        
-    return majority_prediction
-
-
-def majority_score(views_dictionary, integer_labels, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}):
-    """
-    Returns the mean and the standard deviation of accuracy score when predictions are selected by majority of predictions of different views
-    
-    Parameters:
-    -----------
-    views_dictionary : dict
-    integer_labels = array
-    cv : int
-    classifier : str
-    classifier_dictionary : dict    
-        
-    Returns:
-    --------
-    Two floats
-    """   
-    skf = StratifiedKFold(n_splits=cv, random_state=1, shuffle=True)  # provides cv train/test indices to split data in cv train/test sets.
-    prediction_list = [[] for i in range(cv)]  # for majority_list function
-    test_list =  [[] for i in range(cv)]  # for score
-    
-    for key in views_dictionary.keys():
-        i = 0
-        for train_index, test_index in skf.split(views_dictionary[key], integer_labels):
-            # splits data and integer label of one view in test and train sets
-            X = views_dictionary[key]
-            train, test = X[train_index], X[test_index]         
-            y_train, y_test =  integer_labels[train_index], integer_labels[test_index]
-            # trains the classifier and tests it with test set
-            clf = classifier_dictionary[classifier]
-            clf.fit(train, y_train.ravel())
-            y_pred = clf.predict(test)
-            
-            prediction_list[i].append(y_pred)
-            if len(test_list[i]) == 0:  # same y_test for all views
-                test_list[i] = y_test
-            i += 1
-            
-    score = []
-    for i in range(len(prediction_list)):
-        y_pred_majority = majority_list(prediction_list[i])  # majority of views predictions
-        score.append(accuracy_score(test_list[i].ravel(), y_pred_majority))  # score of majority of views predictions vs expected predictions
-    score = np.array(score)
-    return score.mean(), score.std()
-
-
-def score_one_multiview_dataset(cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_samples=1000, n_views=3, n_classes=2, Z_factor=1, R=2/3, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 3 Series (first with dimensions of latent space, views and percentage of dimensions of latent space unsued in views, the second with accuracy score and the third with the standard deivation of accuracy score) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions)
-    
-    Parameters:
-    -----------
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    3 Series
-    """
-    # dictionary contains percentage of unsued dimension of latent space and dimension of latent space and views
-    dimensions = {'unsued dimension of latent space':0, "number of informative features":0, 'latent space':0}
-    dimensions.update({'view'+str(i):0 for i in range(n_views)})
-    # dictionary contains and mean of accuracy scores
-    dict_scores_means = {'latent space':0}
-    dict_scores_means.update({'view'+str(i):0 for i in range(n_views)})
-    dict_scores_means.update({'early fusion':0, 'late fusion':0})
-    # dictionary contains standard deviation of accuracy scores
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    dimensions["unsued dimension of latent space"] = unsued_dimensions_percent
-    dimensions["number of informative features"] = n_informative
-    dimensions["latent space"] = Z.shape
-
-    
-    for i in range(n_views):
-        # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-        dict_views['view'+str(i)] = multiviews_list[i][0]
-        dimensions['view'+str(i)] = multiviews_list[i][0].shape
-        
-    early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-    # dictionary of data
-    dict_data_df = {'latent space':Z}
-    dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-    dict_data_df.update({'early fusion':early_fusion})
-            
-    for key in dict_data_df.keys():
-        clf = classifier_dictionary[classifier]
-        score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-        dict_scores_means[key] = score.mean()
-        dict_scores_std[key] = score.std()
-    
-    mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-    dict_scores_means['late fusion'] = mean_majority
-    dict_scores_std['late fusion'] = std_majority
-    
-    df_dimensions = pd.Series(dimensions)
-    df_scores_means = pd.Series(dict_scores_means)
-    df_scores_std = pd.Series(dict_scores_std)
-            
-    return df_dimensions, df_scores_means, df_scores_std
- 
-
-def score_multiviews_n_samples(n_samples_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_views=3, n_classes=2, Z_factor=1, R=2/3, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the standard deivation of accuracy score) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions) with n_samples_list as index for the indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented accuracy score (with confidence interval) vs n_samples_list
-    
-    Parameters:
-    -----------
-    n_samples_list : list
-                     each element from n_samples_list defines a new dataset with element samples
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # n_samples_list  = list of samples dimension from the lowest to the highest
-    n_samples_list.sort(reverse=False)
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per n_samples
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per n_samples
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for n_samples in n_samples_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-                    
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-        
-        for key in dict_data.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-            
-    df_scores_means = pd.DataFrame(dict_scores_means, index=n_samples_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=n_samples_list)
-
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(n_samples_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_samples, percent, n_informative in zip(n_samples_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_samples)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(n_samples_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Number of samples\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs number of samples for classifier "+classifier)
-    plt.savefig(path_graph+"score_samples_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-
-    return df_scores_means, df_scores_std
-
-
-def graph_comparaison_classifier_scores_n_samples(classifier1, classifier2, n_samples_list, path_graph, cv=10, classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_views=3, n_classes=2, Z_factor=1, R=2/3, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Creates and saves (at the indicated path path_graph) multiple graphs represented scores of classifier2 vs scores of classifier1 (one graph per column of result of score_multiviews_n_samples)
-    
-    Parameters:
-    -----------
-    classifier1 : str
-    classifier2 : str
-    n_samples_list : list
-                     each element from n_samples_list defines a new dataset with element samples
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    None
-    """    
-    df_scores_clf1_means, df_scores_clf1_std = score_multiviews_n_samples(n_samples_list, path_graph, cv, classifier1, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    df_scores_clf2_means, df_scores_clf2_std = score_multiviews_n_samples(n_samples_list, path_graph, cv, classifier2, classifier_dictionary, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-    
-    n_samples_list = df_scores_clf1_means.index
-    keys = df_scores_clf1_means.keys()
-
-    for key in keys:
-        plt.figure()
-        plt.scatter(df_scores_clf1_means[key].values, df_scores_clf2_means[key].values, c=df_scores_clf1_means[key].values)
-        plt.plot([0.0, 1.1], [0.0, 1.1], "--", c=".7")  # diagonal
-        plt.xlabel("Accuracy score for "+classifier1)
-        plt.ylabel("Accuracy score for "+classifier2)
-        plt.xlim(0, 1)
-        plt.ylim(0, 1)
-        plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nAccuracy score of "+key+" for "+classifier2+" vs "+classifier1)
-        plt.savefig(path_graph+classifier1+"_"+classifier2+"_"+str(n_views)+"_"+key+".png")
-        plt.show()
-        plt.close()
-    
-    
-def score_multiviews_R(R_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_samples=1000, n_views=3, n_classes=2, Z_factor=1, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the standard deivation of accuracy score) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions) with R_list as index for the indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented accuracy score (with confidence interval) vs R_list
-    
-    Parameters:
-    -----------
-    R_list : list
-             each element from R_list defines a new dataset with element as R  
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    2 DataFrames with R_list as index
-    """
-    # R_list  = list of diverse values of R from the lowest to the highest
-    R_list.sort(reverse=False)
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per R
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per R
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for R in R_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-            
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-        # dictionary of data
-        dict_data_df = {'latent space':Z}
-        dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data_df.update({'early fusion':early_fusion})
-                
-        for key in dict_data_df.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-        
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=R_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=R_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(R_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    R_label = []
-    for i in range(0, len(R_list), 4):
-        R_label.append(R_list[i])
-        label_index.append(str(round(R_list[i], 2))+'\n'+str(unsued_dimensions_percent_list[i])+'\n'+str(n_informative_list[i]))
-    
-    plt.xticks(R_label, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("R\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs R for classifier "+classifier)
-    plt.savefig(path_graph+"score_R_"+str(n_views)+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
-
-def score_multiviews_Z_factor(Z_factor_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_samples=1000, n_views=3, n_classes=2, R=2/3, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 3 DataFrames (first with accuracy score, the second with the standard deivation of accuracy score and the third with the error rate) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions) with sum of views dimension divided by Z_factor_list as index for the indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented accuracy score vs sum of views dimension divided by Z_factor_list and a graph represented error rate (1 - accuracy score) vs sum of views dimension divided by Z_factor_list
-    
-    Parameters:
-    -----------
-    Z_factor_list : list
-                    each element from Z_factor_list defines a new dataset with element as Z_factor 
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    3 DataFrames with Z_factor_list as index
-    """
-    # Z_factor_list  = list of diverse values of Z_factor from the highest to the lowest
-    Z_factor_list.sort(reverse=True)
-    # list of sum of views dimension for each Z_factor_list item
-    d_v = []
-    # list of Z dimension for each Z_factor_list item
-    Z_dim_list = []
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per Z_factor
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains error rate per Z_factor
-    dict_scores_error = {'latent space':[]}
-    dict_scores_error.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_error.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per Z_factor
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-        
-    for Z_factor in Z_factor_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-        
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-            
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views        
-        # dimension = number of columns
-        d_v.append(early_fusion.shape[1])
-        Z_dim_list.append(Z.shape[1])
-        # dictionary of data
-        dict_data_df = {'latent space':Z}
-        dict_data_df.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data_df.update({'early fusion':early_fusion})
-                
-        for key in dict_data_df.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_error[key].append(1 - score.mean())
-            dict_scores_std[key].append(score.std())
-        
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_error['late fusion'].append(1 - mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-        
-    d_v_divid_Z = np.divide(np.array(d_v), np.array(Z_dim_list))
-    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=d_v_divid_Z)
-    df_scores_error = pd.DataFrame(dict_scores_error, index=d_v_divid_Z)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=d_v_divid_Z)
-    
-    # index and label for graphics
-    label_index = [chr(i) for i in range(ord('a'),ord('z')+1)]
-    label_index = label_index[0:len(d_v)]
-    label_value = ""
-    for label, v_Z, dim_v, dim_Z, Z_factor, percent, n_informative in zip(label_index, d_v_divid_Z, d_v, Z_dim_list, Z_factor_list, unsued_dimensions_percent_list, n_informative_list):
-        label_value = label_value + label+" : V/Z = "+str(round(v_Z, 4))+", V = "+str(dim_v)+", Z = "+str(dim_Z)+", Z_factor = "+str(Z_factor)+", % ="+str(percent)+", n_informative = "+str(n_informative)+'\n'
-
-    x_label = "V/Z = sum of views dimension divided by latent space dimension with :\nV = sum of views dimension\nZ = latent space dimension multiplied by Z_factor\n% = percentage of dimensions of latent space unsued in views\nn_informative = number of informative features"
-    
-    plt.figure(figsize=(10, 10))  # accuracy score vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means[key], '.-', label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nAccuracy score vs ratio sum of views dimension / latent space dimension for classifier "+classifier)    
-    plt.savefig(path_graph+"score_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # error rate vs d_v_divid_Z
-    for key in dict_scores_means.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_error[key], '.-', label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Error rate for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nR = "+str(round(R, 4))+" - number of classes = "+str(n_classes)+"\nError rate vs ratio sum of views dimension / latent space dimension for classifier "+classifier)    
-    plt.savefig(path_graph+"error_Z_factor_"+str(n_views)+"_"+str(n_samples)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    plt.figure(figsize=(10, 10))
-    
-    for key in dict_scores_means.keys():
-        plt.errorbar(d_v_divid_Z, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nAccuracy score vs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_errorbar_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    """
-    plt.figure(figsize=(10, 10))  # accuracy score of early fusion divided by accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['early fusion']/df_scores_means[view], '.-', label='early fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for early fusion / accuracy score for each view for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRatio accuracy score for early fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_majority_view_divid_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-    
-    plt.figure(figsize=(10, 10))  # accuracy score of late fusion divided by accuracy score of each view vs d_v_divid_Z
-    for view in dict_views.keys():
-        plt.semilogx(d_v_divid_Z, dict_scores_means['late fusion']/df_scores_means[view], '.-', label='late fusion score divided by '+view+' score')
-    plt.xticks(d_v_divid_Z, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.text(plt.xlim()[1]+0.05, plt.ylim()[1]-(plt.ylim()[1]-plt.ylim()[0])/2, label_value)
-    plt.xlabel(x_label)
-    plt.ylabel("Ratio accuracy score for late fusion / accuracy score for each view for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04, 1), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nRation accuracy score for late fusion / accuracy score for each view \nvs ratio sum of views dimension / latent space dimension for classifier "+classifier)
-    plt.savefig(path_graph+"score_Z_factor_all_view_divid_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std, df_scores_error
-
-
-def score_multiviews_n_views_R(n_views_list, R_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_samples=1000, n_classes=2, Z_factor=1, n_clusters_per_class=2, class_sep_factor=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns a dictionary with n_views_list as key containing a list of DataFrames (represented accuracy score divided by accuracy score for R=1 <> redundancy null) of views, 
-    early fusion predictions (concatenate views predictions and late fusion predictions (majority views predictions) with R_list as index for the indicated classifier per key
-    Creates and saves (at the indicated path path_graph) a graph per value of n_views_list represented accuracy score divided by accuracy score for R=1 vs R_list
-    
-    Parameters:
-    -----------
-    n_views_list : list
-                   each element from n_views_list defines a new dataset with element as n_views 
-    R_list : list
-             each element from R_list defines a new dataset with element as R                   
-    path_graph : str
-                 path to save graphics
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_classes, Z_factor, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-            
-    Returns:
-    --------
-    a dictionary with n_views_list as key containing a list of DataFrames (represented accuracy score divided by accuracy score for R=1 <> redundancy null) with R_list as index per value of n_views_list
-    """
-    dict_n_views_R_ratio = {key:0 for key in n_views_list}
-    # n_views_list  = list of diverse values of n_views from the lowest to the highest
-    n_views_list.sort(reverse=False)
-    # same views have same colors on each graphs
-    dict_colors = {'view'+str(i):0 for i in range(n_views_list[-1])}
-    prop_cycle = plt.rcParams['axes.prop_cycle']
-    colors = prop_cycle.by_key()['color']
-    for key, c in zip(dict_colors.keys(), colors):
-        dict_colors[key] = c    
-    dict_colors.update({'early fusion':'purple', 'late fusion':'maroon'})
-    
-    for n_views in n_views_list:    
-        # R_list  = list of diverse values of R from the lowest to the highest
-        R_list.sort(reverse=False)
-        # list of percentage of unsued columns of latent space in views
-        unsued_dimensions_percent_list = []
-        # list of number of informative features of latent space
-        n_informative_list = []
-        # dictionary contains mean of accuracy scores per R
-        dict_scores_means = {'view'+str(i):[] for i in range(n_views)}
-        dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-        # dictionary of list of scores' mean of view for diverse R divided by score's mean of view for R = 1 (<> redundancy null)
-        dict_scores_ratio_R_1 = {'view'+str(i):0 for i in range(n_views)}
-        dict_scores_ratio_R_1.update({'early fusion':0, 'late fusion':0})
-        # dictionary contains data of each view
-        dict_views = {'view'+str(i):0 for i in range(n_views)}
-        
-        for R in R_list:
-            Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-            unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-            n_informative_list.append(n_informative)
-            
-            for i in range(n_views):
-                # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-                dict_views['view'+str(i)] = multiviews_list[i][0]
-                
-            early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-            # dictionary of data
-            dict_data_df = {'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)}
-            dict_data_df.update({'early fusion':early_fusion})
-                    
-            for key in dict_data_df.keys():
-                clf = classifier_dictionary[classifier]
-                score = cross_val_score(clf, dict_data_df[key], y, scoring='accuracy', cv=cv)
-                dict_scores_means[key].append(score.mean())
-            
-            mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-            dict_scores_means['late fusion'].append(mean_majority)
-        
-        for key in dict_scores_means.keys():
-            score_R_1 = dict_scores_means[key][-1]  # R = 1 = last value of R_list => last score value in dict_scores_means[key]
-            dict_scores_ratio_R_1[key] = np.divide(np.array(dict_scores_means[key]), score_R_1)
-                
-        df_scores_ratio_R_1 = pd.DataFrame(dict_scores_ratio_R_1, index=R_list)
-
-        plt.figure()
-        for key in dict_scores_means.keys():
-            plt.plot(R_list, dict_scores_ratio_R_1[key], '.-',  color=dict_colors[key], label=key)
-        # index and label for graphic
-        label_index = []
-        R_label = []
-        for i in range(0, len(R_list), 4):
-            R_label.append(R_list[i])
-            label_index.append(str(round(R_list[i], 2))+'\n'+str(unsued_dimensions_percent_list[i])+'\n'+str(n_informative_list[i]))
-        
-        plt.xticks(R_label, label_index, fontsize='medium', multialignment='center')  # new x indexes
-        plt.xlabel("R\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-        plt.ylabel("Ratio accuracy score / accuracy score for R = 1 for "+classifier)
-        plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-        plt.title("number of views = "+str(n_views)+" - number of samples = "+str(n_samples)+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nRatio accuracy score / accuracy score for R = 1\n(redundancy null) vs R for classifier "+classifier)
-        plt.savefig(path_graph+"score_R_divid_R_1_"+str(n_views)+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-        plt.show()
-        plt.close()
-            
-        dict_n_views_R_ratio[n_views] = df_scores_ratio_R_1
-        
-    plt.figure()
-    ax = plt.axes(projection="3d")
-    
-    for n_views in n_views_list:
-        for key in dict_n_views_R_ratio[n_views].keys():
-            if n_views == n_views_list[-1]:  # print legends only once
-                ax.plot(R_list, dict_n_views_R_ratio[n_views][key], n_views, color=dict_colors[key], label=key)
-            else:
-                ax.plot(R_list, dict_n_views_R_ratio[n_views][key], n_views, color=dict_colors[key])
-    
-    ax.set_xlabel("R")
-    ax.set_ylabel("Ratio accuracy score / accuracy score for R = 1 for "+classifier)
-    ax.set_zlabel("Number of views")
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of samples = "+str(n_samples)+" - factor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nRatio accuracy score / accuracy score for R = 1 (redundancy null) vs R, number of views for classifier "+classifier)
-    plt.savefig(path_graph+"score_R_divid_R_1_all_n_views"+"_"+str(n_samples)+"_"+str(Z_factor)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-
-    return dict_n_views_R_ratio
-
-
-def score_multiviews_class_sep(class_sep_factor_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_views=3, n_samples=1000, n_classes=2, Z_factor=1, R=2/3, n_clusters_per_class=2, n_informative_divid=1, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the standard deivation of accuracy score) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions) with class_sep_factor_list as index for the indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented accuracy score (with confidence interval) vs class_sep_factor_list
-    
-    Parameters:
-    -----------
-    class_sep_factor_list : list
-                            each element from n_samples_list defines a new dataset
-    path_graph : str
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, n_informative_divid, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per class_sep_factor
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per class_sep_factor
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for class_sep_factor in class_sep_factor_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-        
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-                
-        for key in dict_data.keys():
-            print('key', key)
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-        
-        print(dict_scores_means)
-                    
-    df_scores_means = pd.DataFrame(dict_scores_means, index=class_sep_factor_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=class_sep_factor_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(class_sep_factor_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_samples, percent, n_informative in zip(class_sep_factor_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_samples)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(class_sep_factor_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Factor (class_sep = factor*n_clusters_per_class)\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs factor of class_sep for classifier "+classifier)
-    plt.savefig(path_graph+"score_class_sep_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
-
-
-def score_multiviews_n_informative_divided(n_informative_divid_list, path_graph, cv=10, classifier="SVM", classifier_dictionary={'SVM':SVC(kernel='linear'), 'NB':GaussianNB()}, n_views=3, n_samples=1000, n_classes=2, Z_factor=1, R=2/3, n_clusters_per_class=2, class_sep_factor=2, d=4, D=10, standard_deviation=2):
-    """
-    Returns 2 DataFrames (first with accuracy score and the second with the standard deivation of accuracy score) of latent space, views, 
-    early fusion predictions (concatenate views predictions) and late fusion predictions (majority views predictions) with n_informative_divid_list as index for the indicated classifier
-    Creates and saves (at the indicated path path_graph) a graph represented accuracy score (with confidence interval) vs n_informative_divid_list
-    
-    Parameters:
-    -----------
-    n_informative_divid_list : list
-                                 each element from n_informative_divid_list defines a new dataset with element as n_informative_divid
-    path_graph : str
-    cv : int
-    classifier : str
-    classifier_dictionary : dict
-    n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, d, D, standard_deviation : parameters of generator_multiviews_dataset
-        
-    Returns:
-    --------
-    2 DataFrames with n_samples_list as index
-    """
-    # list of percentage of unsued columns of latent space in views
-    unsued_dimensions_percent_list = []
-    # list of number of informative features of latent space
-    n_informative_list = []
-    # dictionary contains mean of accuracy scores per n_informative_divid
-    dict_scores_means = {'latent space':[]}
-    dict_scores_means.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_means.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains standard deviation of accuracy scores per n_informative_divid
-    dict_scores_std = {'latent space':[]}
-    dict_scores_std.update({'view'+str(i):[] for i in range(n_views)})
-    dict_scores_std.update({'early fusion':[], 'late fusion':[]})
-    # dictionary contains data of each view
-    dict_views = {'view'+str(i):0 for i in range(n_views)}
-    
-    for n_informative_divid in n_informative_divid_list:
-        Z, y, multiviews_list, unsued_dimensions_percent, n_informative = generator_multiviews_dataset(n_samples, n_views, n_classes, Z_factor, R, n_clusters_per_class, class_sep_factor, n_informative_divid, d, D, standard_deviation)
-        unsued_dimensions_percent_list.append(unsued_dimensions_percent)
-        n_informative_list.append(n_informative)
-
-        for i in range(n_views):
-            # multiviews_list[i] = (columns / data of view i, numeros of columns of view i)
-            dict_views['view'+str(i)] = multiviews_list[i][0]
-        
-        early_fusion = np.concatenate([dict_views[key] for key in dict_views.keys()], axis=1)  # = concatenation of all views
-        # dictionary of data
-        dict_data = {'latent space':Z}
-        dict_data.update({'view'+str(i):dict_views['view'+str(i)] for i in range(n_views)})
-        dict_data.update({'early fusion':early_fusion})
-                
-        for key in dict_data.keys():
-            clf = classifier_dictionary[classifier]
-            score = cross_val_score(clf, dict_data[key], y, scoring='accuracy', cv=cv)
-            dict_scores_means[key].append(score.mean())
-            dict_scores_std[key].append(score.std())
-                
-        mean_majority, std_majority = majority_score(dict_views, y, cv, classifier, classifier_dictionary)
-        dict_scores_means['late fusion'].append(mean_majority)
-        dict_scores_std['late fusion'].append(std_majority)
-
-    df_scores_means = pd.DataFrame(dict_scores_means, index=n_informative_divid_list)
-    df_scores_std = pd.DataFrame(dict_scores_std, index=n_informative_divid_list)
-    
-    plt.figure()
-    for key in dict_scores_means.keys():
-        plt.errorbar(n_informative_divid_list, dict_scores_means[key], 1.96*np.array(dict_scores_std[key])/sqrt(cv), label=key)
-    # index and label for graphic
-    label_index = []
-    for n_informative_divid, percent, n_informative in zip(n_informative_divid_list, unsued_dimensions_percent_list, n_informative_list):
-        label_index.append(str(n_informative_divid)+'\n'+str(percent)+'\n'+str(n_informative))
-
-    plt.xticks(n_informative_divid_list, label_index, fontsize='medium', multialignment='center')  # new x indexes
-    plt.xlabel("Factor (n_informative = dimension of latent space / factor)\nPercentage of dimensions of latent space unsued in views\nNumber of informative features")
-    plt.ylabel("Accuracy score for "+classifier)
-    plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
-    plt.title("number of views = "+str(n_views)+" - R = "+str(round(R, 4))+"\nfactor of latent space dimension = "+str(Z_factor)+" - number of classes = "+str(n_classes)+"\nAccuracy score vs n_informative_divid for classifier "+classifier)
-    plt.savefig(path_graph+"score_n_informative_"+str(n_views)+"_"+classifier+".png", bbox_inches='tight')
-    plt.show()
-    plt.close()
-        
-    return df_scores_means, df_scores_std
diff --git a/multiview_generator/tests/_old_unit_test_update.py b/multiview_generator/tests/_old_unit_test_update.py
deleted file mode 100644
index 44c9489..0000000
--- a/multiview_generator/tests/_old_unit_test_update.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import unittest
-import numpy as np
-
-from .._old_update_baptiste import MultiviewDatasetGenetator
-
-class TestSubSmaple(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.indices = np.arange(100)
-        cls.quantity = 10
-        cls.method = "block"
-        cls.beggining = 0
-        cls.generator = MultiviewDatasetGenetator(random_state=cls.random_state)
-
-    def test_block_simple(self):
-        chosen_indices = self.generator.sub_sample(self.indices, self.quantity, self.method, self.beggining)
-        np.testing.assert_array_equal(np.array([0,1,2,3,4,5,6,7,8,9]), chosen_indices)
-
-    def test_block_too_big(self):
-        chosen_indices = self.generator.sub_sample(self.indices, 121,
-                                                   self.method, self.beggining)
-        np.testing.assert_array_equal(np.arange(100),
-                                      chosen_indices)
-
-    def test_block_no_beg(self):
-        chosen_indices = self.generator.sub_sample(self.indices, 10,
-                                                   self.method, None)
-        np.testing.assert_array_equal(np.array([82, 83, 84, 85, 86, 87, 88, 89, 90, 91,]),
-                                      chosen_indices)
-
-    def test_block_no_beg_too_long(self):
-        chosen_indices = self.generator.sub_sample(self.indices, 120,
-                                                   self.method, None)
-        np.testing.assert_array_equal(np.arange(100),
-                                      chosen_indices)
-    def test_choice_simple(self):
-        chosen_indices = self.generator.sub_sample(self.indices, 10,
-                                                   "choice")
-        np.testing.assert_array_equal(np.array([77, 10,  4, 83, 62, 67, 30, 45, 95, 11]),
-                                      chosen_indices)
-
-    def test_choice_too_big(self):
-        chosen_indices = self.generator.sub_sample(self.indices, 105,
-                                                   "choice")
-        self.assertEqual(100, chosen_indices.shape[0])
-        self.assertEqual(100, np.unique(chosen_indices).shape[0])
-
-
-
-if __name__ == '__main__':
-    unittest.main()
-- 
GitLab