From 9907ffa6a0c1c02cc41b8c913b6ae0c75f4c8ffe Mon Sep 17 00:00:00 2001
From: Dominique Benielli <dominique.benielli@lis-lab.fr>
Date: Tue, 14 Jan 2020 19:45:10 +0100
Subject: [PATCH] testpassed

---
 multimodal/boosting/boost.py                  |  20 +-
 .../__pycache__/__init__.cpython-36.pyc       | Bin 304 -> 0 bytes
 .../datasets/__pycache__/base.cpython-36.pyc  | Bin 1249 -> 0 bytes
 .../__pycache__/data_sample.cpython-36.pyc    | Bin 16022 -> 0 bytes
 multimodal/datasets/data_sample.py            | 188 ++++-----------
 .../tests/__pycache__/__init__.cpython-36.pyc | Bin 168 -> 0 bytes
 .../data/__pycache__/__init__.cpython-36.pyc  | Bin 169 -> 0 bytes
 .../get_dataset_path.cpython-36.pyc           | Bin 817 -> 0 bytes
 .../__pycache__/__init__.cpython-36.pyc       | Bin 182 -> 0 bytes
 .../get_dataset_path.cpython-36.pyc           | Bin 830 -> 0 bytes
 multimodal/tests/test.py                      | 224 ------------------
 multimodal/tests/test_data_sample.py          |   4 +-
 multimodal/tests/test_mumbo.py                |   4 +-
 13 files changed, 65 insertions(+), 375 deletions(-)
 delete mode 100644 multimodal/datasets/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 multimodal/datasets/__pycache__/base.cpython-36.pyc
 delete mode 100644 multimodal/datasets/__pycache__/data_sample.cpython-36.pyc
 delete mode 100644 multimodal/tests/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 multimodal/tests/data/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 multimodal/tests/data/__pycache__/get_dataset_path.cpython-36.pyc
 delete mode 100644 multimodal/tests/datasets/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 multimodal/tests/datasets/__pycache__/get_dataset_path.cpython-36.pyc
 delete mode 100644 multimodal/tests/test.py

diff --git a/multimodal/boosting/boost.py b/multimodal/boosting/boost.py
index 7b039e6..5ef38dd 100644
--- a/multimodal/boosting/boost.py
+++ b/multimodal/boosting/boost.py
@@ -26,14 +26,22 @@ class UBoosting(metaclass=ABCMeta):
         else:
             check_array(X, accept_sparse=['csr', 'csc'])
         if X.ndim < 2:
-            mes = "Reshape your data"
-            raise ValueError(mes)
-        if X.ndim > 1:
+            X = X[np.newaxis, :]
             if X.shape[1] != self.n_features_:
-                mes = "Reshape your data"
                 raise ValueError("Number of features of the model must "
-                                 "match the input. Model n_features is %s and "
-                                  "input n_features is %s " % (self.n_features_, X.shape[1]))
+                                    "match the input. Model n_features is %s and "
+                                     "input n_features is %s " % (self.n_features_, X.shape[1]))
+            else:
+                mes = "Reshape your data"
+                raise ValueError(mes)
+        if X.ndim > 1:
+            if X.shape[1] != self.n_features_:
+                if X.shape[0] == self.n_features_ and X.shape[1] > 1:
+                    raise ValueError("Reshape your data")
+                else:
+                    raise ValueError("Number of features of the model must "
+                                    "match the input. Model n_features is %s and "
+                                     "input n_features is %s " % (self.n_features_, X.shape[1]))
 
 
             #
diff --git a/multimodal/datasets/__pycache__/__init__.cpython-36.pyc b/multimodal/datasets/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index 78203c1c83371c7711d386af2cea6da83faec7cb..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 304
zcmXr!<>mU{ArhCzz`*dB0SPcMFfceUFfbI$FfcHrFr+Z%Fob|;rX0pxrYNRd<|t-H
zh7{&t22GZiAZ?nAw-~iFnQn2pB$gxwC*~I9q~7B9EzK#(^vzF6%yBF#O04wLWQmf<
z1&QT?#Pm`UOA?DyON#Z95{pxB$zziNDTyxz8&|~4z`#(%!oa|=lA(wV#0C++BJ?xz
zb5r$G@^druG7C#n^$Uvfvr<cn^&S0!bbT_*Qj7JAlQXk3OLP(D=cFbU<>@2%`XFmS
kF42#V&&<m#iI3MSsJz8tlbfGXnv-hB2nq*KfN(GZ0KQsTeE<Le

diff --git a/multimodal/datasets/__pycache__/base.cpython-36.pyc b/multimodal/datasets/__pycache__/base.cpython-36.pyc
deleted file mode 100644
index 5d08b5d131a62ac27ac9450f9ba3386c212792bd..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1249
zcmXr!<>iv^6p4Gl#K7>F0SPcMFfceUFfbHnF)%QsFr+Z%Fyt~uF*1VKOgT(Z%qa{h
zOgSuBtWm5f3@OYxY`N@F>|j}z9FAPhC@wG?qLMX*t%V_qJB2-kqlF=gCxtVGtA!zo
zH-$Ter-dPkFNHUSuZ1CsKZPlnL6iR_$Pt>1xA+Q*GV@B}(@OJ_OEUBG{4^PFak(Ux
zBnBtu7UZOAGTvfO4#-T-&Ph#X1nFR4U;sIpi-Cc`8RTp=1_p)_hAhS!hGxcop<2co
z#uP?Lh8o5cCP{`greKCZh9HIrh7^Wi22EzaTZ|F67%MfIZZYK*++xYePfXEdy2Y86
znUk8An420O#hzDC1>&t_C<0mVD?~pdKQ~oBB|kSaFSD>TRllGpKP$DQSl`hvNY^K`
zEVWp_I5{&rvqU$yG^ZppH$NpYCnq(rC{G{3*H1|-Nh}6wN=htF)hnpH#RIZ9KChrE
zJ|e!78{`QVkk1%-7>jrq7#Nb7K|BTq1`y2(^QQ>hpEZm%Oeu`L%%Fe|X3%8v(_|_F
zDJx=QU|`T>Dq?3~V7SFvoLH7xrO8~x$-uyXkOq4TLVzs3#RF0Xa|%DmDWJe*U@YPT
zIRwmxIs_CRNC6HC=4PgTp<1RA#w?~9h8m`3CPs#Eh7{xgXY#wnRGy^Ce2XQ&AT{q6
zTLCy8Z?UA5<`!r&M=_^l#)I8=i!lx2CXgjXV25#n9hQ=rTp|Q=6+D1I!2nMPpa25-
z59}@#Z~)aXq%bx!^$XN8g5AZ~%vj5Wa8(T`<$+vPl%&aABmfE;K@h<K3OQ(42{SM-
zM6ng6mc*xIVz>$%b0AlVfm{U&a0W&mMiIs$5FZ|KAlHK226h)Dfr7$IoFRp&nW>f;
z>@ubrW>ARLFlRA0GeDz~*{_OQuNa)}^$N0cia-&f$%5ogP*~hzP0q<LPSs?&#ZsJ_
zlNQCCm{JtQo&wQU1advPt3~3Ii&7IyQsW`v9}i05#WDyF@PSgEBq#_3ZgIuOr<InJ
z7Ny3=7lESx7HeK<Zb2m^QE`CTdbx?Wm~s<u$s^LZ9xRQ68SzlJfP&x_XK`kQUT%I_
zYH<-LSc<^m4NB(VBvu59PjKKBfgFJ3azs+#u*uC&Da}c>1Lgl>kOm$`4n{6U9wr_p
L4rUG(FlGY)=~xNa

diff --git a/multimodal/datasets/__pycache__/data_sample.cpython-36.pyc b/multimodal/datasets/__pycache__/data_sample.cpython-36.pyc
deleted file mode 100644
index aad5511d634098208d0e9aa1cb41329d923a21d6..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 16022
zcmXr!<>h*`Pa<wz0t3Ti1|-1Fz`)?Zz`#)4$H2gl!Vtxf!kEI8!<fqy#l#3=Gv_cz
zv7|7jFz2vlu|=_gS?pOHQJn4!DJ&_hEet8FsZ1$sz06Tu?hGmHDI6^fDICpAQQYnf
zDV!->Eet7Kshn9nshnB7sobet&1_M8?hGm1DLgF<DLknxS^TM7%`8y@?hGlsDSRyq
zDSWAd&CF3kDa^qPn*51tnHU&CGBS%5a`RJ4b5a$O^Ycm)GxLfSN-|OvToOwXgA;QL
zK;k)x#l;GVc_|9MsU=03$vLTsMS1awMMa5~3dtZjt`KY*QuC4%3yMo~5=&Bx6%rMS
zA%>|JD<tRV7Ubup=9Lt4<t6!m4bDq)tx(7-%}q)zD%Rmbw+3R1LS|k@YEfp1LRwLN
zE=X}fr9xf`#2FyR!JVI|V3eYel30=mljbVVEXe@-syHLDAXPylFCOBWVjYFN__EB@
z^7u>zErq=JwA93s(xTMj_)JYME&~N9Fa*&?Ai|i-777$VG=ku&fC2>&jUa}+ILMnI
zjPNR$#l;0mBajGFK#4?9Oz42aIxWAns3b!nB{R7sGe0k}C^NNKAuYcMnh<oj3W`$G
zGAncxic8bdGAqEzAT2SuB)>?ZJR`Lz6=X)GLShjpUFH^)mZYX|y#!?~O~za7j!w?L
zsU?Yi$shq3W`<=p4F(2=RE8+V6ox3KRAx|?Yhh?+jABV)PGM<bh+<7)O<`+ch+<1&
z4`$HhxW(sNnp2YLo1c=H14{JCOi%;a7#J8>85kIxK^eG(k%6IvA&aqwp_#E3#A2#p
z$YL&G$YN<`D(b0W$YM=l%w{U;DPaStNMXumDGDoL$YQTyNMTM9X=RdRr~zkq5G$Lt
znXxFWgd>HenX!qnh9QMjk|BjPo1r+bh9N~%lA(q%o296%gd;@^B%i`j3$lw7tcp>B
zp_X|9L*6@>jT0D)B%m(g%4RBFQ^JuV4$@VzuY?=q5*LQpm>8y7mReS@>sgx_YZz)+
zY@j}<VUdLUqWDD#M~VbUFGz(XLl#dn*i{o4i@Bg8yv-oDu@-GB;Yg7JsVv+U&X5-Z
zb_-KB(*(w%loEz4kZYkKGl8+#u7o2+0Hn4C*~F+ChAgOQ6BvtfN;p!aL2B|iN;p!4
zK&%vDgs*FvYZz*nYnakN#g<H!ooilkX;G>|K~a7|Y7r<&<tCPZC<RbXE{@O4O93aQ
zqSTVoqC8MRk(64bke>#T)_cjwz`*bl6v=*7%3+B)rK!aVnRyUNu-@F#;u3|VRE6UF
zqLS1Uy&{nMDot!EL5VXjzXYT`Q6V|Ms3<kLM4?KiC^0WR73?5TCMhV&FUw3xg_KBo
zx7dnvGLut_tK>mtW?Fs`$n7w<fvwLhRsfk<#Z_EdPylg4l?*o9VeYKr%*-oEO;0T<
z)~iy)rVz!MRq`;0;&5gaE2!wyt5U^g6wFS9jVc6;gBYesz_$p$-eS+pO96$@%m4rX
z|1aWTU|^_n#pV<gUs2f+#CqO=QioB?Yt#uLO}<-9c?Gvv!FB2_uFT@%(xjA<%7WBe
ztPqMF8t=DQ@=`K$Z!zbj=G|gW%&WY`1r7+;qN4nwTdd$p_7+=yQdVkm$t}**ywcp%
zqQsKaTg<tcdAFEz6Du^?ZgIjh>n$!s#e0hjBpjccpOPAXiycxo7vEye%uBh&T$Y%#
zlA%bJfq~&yw0=f@ZmNDter{%7W?^ZnenC-wR%%JHzN24|u1{uJYO#KCa%OgBiEb{a
zY|jOi?ci36K7y|gN}0tVO&|ueL7-Pqc}p@rEwiY&B)%*$Co?6nBo*91fD|jBngG<E
zV_@cDl48_i<Y1IwWMX7vWMLFy;$h?gvxOM>7)78g0Y)aUxDX=;qYNVxBL@=)BMT!J
zW0eMKouh}-N_cGqDzHE`61X<<U|?XVVaQ?t*EtF`3@J=0Os$}Dm$8OXk|B*rk|7vW
z-=K)rfNC$s6lQS-NrqM?h?*3pU<OT=Do59flA^@q5>S;3E(qa;S9)eyYMw%7UP@{O
zhy(`(r2Pdd+CTwUoLZs~p;rWI>G~D1FfcG^G8IWOFfbH}gW0!Oic@paZgFJhrGPbq
z;}1f}F)%RP;*C#**bSBi`KuV@O9mz`Mlq%;8T4R6R-6o~@IiGoNCUj0o&>9?8Jihv
z8Nt;NBfO&St6_vz)O{svHH^)SNEJ0ByrO0VwZpMg)QsRN7*bI)f-7oJHIGzL*D%C$
z)-Yyqfz`pP>JKnGComRC)j-_Boy}Cdt^`q4^MKrfrK)CxSJjLtRrM=)RSgm6g;&))
zH4qWLW`-K3Y}TS3@Tz(TxT*&EA5vAPp@z)_#$pF}Rm}*hoIy>J35>-tHH=wM(<U$$
z<zcI;k^NoET*Fwy5YJ!3T*DAAP{UZmoCZo{6j$5ev{eL(=7FfTi$G~?w9bZ<g3uNU
zqRs~AesEO{Ca6`Z5#MMSsr9vB5htjq<pL4hAc6-(@PY_F5Wx>31VDr!hya(%LLinf
zh!6o0q98&HM1TtHA}LU*%~qrU7mxt4K%zy`AeIb>0M)@rWw<bws?r}+Rl0ymXC~CD
zl8uC_l8upzk&Tg!u}TTO9>A=d{WRHcam2^xCFZ8a$KT?L2elzeb3kmK`1r!o#2ly$
zPjN|NNoI0xYDq?ZN|8LsJ4zrz8AKpF24;b3s3K4;17d*dB@QMY4Gwm&m?q;b&fL_J
zMDPd*_KqkhSYaK}6qXdG6y_GjDCQK#U<OUrTOtTQ1s5b16{mXUrRDp<D>YEWf?^X~
z-`g=VFr+ioGN&-svVi(=>7ZUHYYk%-Q!QHxQ!RT6b1g>+a~2Dz8Y$WYu5ehgS&BB*
zFr_eMa~364@|7@TvDGj&GuASNGZe04WB|bu_7ql7Un`QKa9spLo)1_xN71?(#w?B$
z7D<K_kQuW|7_vB17~x`UQVcM5Iy1<;JP}kC>0n!G7;9N;7_zu(m|&`#nPQl0nQGZ;
zIck`)SW{TCnTmeZaAa|(us1Un{Yv3T;cQ_j;i+M&VFCpesQPVYasm018EgV0*gXsr
z7>hxbD9n8f6Bvt>N_bPaK>n!Vs9}ibt6@T_e%NXl;swF29PTOw=ls0llA==Z5JP?%
z=3o}Mr{@PQ>cO>5l_InOs!*JfUz(Ew>J;W=7MCdG7b)ZwfE&r+3h5R*s85%fm!1sj
zDuU8HD9wP{LcyS10Lnq>3^fd~0x?XrjI~UlAdz60$W+J@3~A1SvyvwBEhaq!P*c|s
zL>PeuAp|J({o=9#Sq&OGvzrPs71AE8GQ^gO^y1?)^D;}~<MnKE@{<#DitY3ex~pU}
z^9o8!AOmC&k0qsoyPT=H1tpc5Lf|sx7CUrk?G{UMW>xAfrs9HI9GS(%;FMYfDqL={
zCl-Ul;1*{{WkD*q?Oy~gT)@Q<C}-W`DlSPZDk%mHzujW4$}A{C6holW<`x&Ys{pE1
zN+2Z<C%ELf#R6(s-r@lF1v2weip)VxMOJV#^cF{6YI!_J9<1XQdr3xWc4}oYq(cG9
z^{J3hgXdsSu>c8<Dp0_gfg*^B1>6p2V`O1sVG?2FU}R$CV`O6#VB}!rV^m@iVPauo
zV-#W(VU%G~VB`Q7LTrpxvUmeqlhIF;rAP%-j;MkNH4tG3BEU{T5TN)h(qv#@r~tVK
z6p<WEU_MGI0ul%HHHty$2VRPR$`4RE!Vtxh!U8TwMA04P2pX78oWRDwzy%t!L`Z`r
z@Qk8zae*}iBo-y+rk11@6@$6DZ~&181-*imLULlBLQ*PN1;`dK)<q2VRe}awz@05n
zBr1RtiUKGdD1f6}!4_>07B=##sQ~VKfOYDEw4&Mowgxnan3|ZJ0q&~kp)061u!2~p
z5gi>HtAj4BV2EE@PfxEFRgW%c2&n)x6j_v5sgRSIok|~%C`3m`>*?uX^Me9z8ITTi
z_g6!LALR5}h}%&{z(K)Wte~ZUGytv%QKk--0u4*4E2Lzmr52^;l_-?tE2#VB=cTHn
zTB87oIdF0V$D9JBoPmzC>v4eqIQ|qAz%d8P_9+U`*w6(zPC+9jH7&6;r^HqPWPyUF
zLS`D6g<%uOoM;2k=shgq=<6FHb7M6XGC?y9&=Nm2MM1*|Vyh-r!@!9?u_U!fp|~J5
zIWw&?GcR2sBflKvTWF^f+&c!ResHKjy1@}vkX!+B5D4o+ECB@>$N`B8hPp10<Os?T
zkRe3{=qO4tWSl%TuLLwUj^-@zC|F_%co<wECo?y*q!?rZsM`&yZBmOBN{drd(47Z%
z8G4$B45x#n6cQ5h5)wcTgLLeZD$#6BNQj8G(n0tkIxp4=q!Aof(Rtd2v9SpWdI&E&
zmXs7_CY6??Ldpto2%wc0ATf{;;2^P50M%k3kAQ@V)j@*_naQaN3L3eIm7w|+6z`e{
z^I;XR6)4O=`4D+17oiMXrhwd7g0LVnuLP<A9B|Nd0xn(Pg-0>M2v8lOV5I<RC4!un
zom!~_oreHNd2)VkDkR1ekqS93h<+>A3h?9>G%eWK*+G<oOEK`^lOD(r&|s5ZQettc
zLS`;#WLF_4KQSd9G(Q541#H?%Qj1H95t`FeOTblMacW6?L1IY;%pi1eoaVuu0b)SM
zykLfaCO;rEehBTEIjIV^3hLmT9A5!)t6o8N4kYJ;+zT>B!4}~W4Rn`iq-Ex$YC>Za
zqkRPKC2V7WPOUJeFiJ8sGZqz;Fl8~<Fr+coFwSG@V_;+`VW|Pjl`v$nHZv7DfoPBl
zh-fntBSYaPBsFZ!Ohus44K@rl>?us3hGL#m2}=!V3It@@B$#Os`)Zi_7#SJz6rd_V
zW^DpBo<U|Is{qfTvJly*tWpNgvqD-=pd<(y^+)eU`)P6%8G@P%Mj!&zJ}3gU9zf%;
znR&$}iFwJXx44pvi{f(=ONufpK<s2FyT}C8ZDWQso|qGh(r>Y4mnRmb7o#<Wzy@9e
zwK_mu69y(eMgc|vMiCHZ;$q@q6l3IJtdav4;o$i&NXG=+Zh*Hb{WMw88acPv<Kt8E
zljGyT#v@q?Zt{RzJN66=4D&$tf?CBKW-LraAQ6=I4yerxYMK>;T4^Zl9ZB%S9b*(r
zDs<|unGrO32bv9!VsB?iV+7BIb3}1~*`V2PjwnvZjCT}Q3V(_~3qurlieQRR3qur7
zig1cZ3qurdifD>h3qurNig=1d3qurtI|B<tlt3_prqnHdL;!%jo~X-C-?kJT+EKXM
zQ25$Uc-l^=?Izl^nQ%KAS6c~tI|;Roq^m%W_7TYK;B<wzZ3M0rp*1DA(MDpsh-z&j
zYP5%_(iWmpJBY|O5Gm~+w6+ht*`o_-6o8@<8k5Agd5CE5P}tTPZRgOmodas^f*M8z
zMVWae8ldKpUVJ=wh%i1L((|wZ^AyxU?LKuocvB19Rs^M)Vg*|Ta6tjCfx#6J*c^y%
zkl6^Odf>zWHbcYE$UsNI$ix7vI#8^Gm4R%H2k|sOMTKE3R@Gor!Odx~o@lVqhDHXl
zU@n>yph*@iWdv;yBZe_SW6_{-X2>umGqz#O6wn|hb1i!cOD#tUQw?JYb2C#>38+2S
z%vj4-VFen_Y-Vs_h}DZ>s^zTZssRmUvea<Zur@Q-veqzXvDa{AailPT+K!A2H7qGi
z<_xtgg;wF9flAJ1hFW&8sSH_+%}n_dYM4@(vpMo7famETGhte=0mfRk5{4}98nzTB
zanQgcQwn1ZQwpOELq$OiqYFc9MlBb3AeE<vtC_KuE1V%u1T>7w$WX!y8%WJ#ieSKS
z5!7`xOrSYDaRv|#vcU#4h09ih@CnT2yTTbJFy<K{Ts?uY=pEF(EFd=66?~w<-daYS
zt^oOya{^=045(`Guq=vdHc(qQk^$uAyZ}hJOkgZ}0S*l&Ne0l|=mf^1D<uqBJkSAP
zkSIHJ0GLUV0cijjNd;m6I4`f#p@bm|;!@Tc)-0|RP>3@XNz|}r@ux7=ux4?9+x$%6
z<w+njKq4TMVXoC-VJH#E=9<7%6vfC;%gxA8#aAMj!U1x5I71|8aTRkdcO7>G*cUYn
zSwb}sQ$eW#dB7QF^NJddEa4Q+X2uCjMJrOcQn*_fN<_ee&P*xH$b-%$qBX23Jk3lK
zn2OGnh-Hb_ux5c=E!oVJB?VeoRm+nX09M1>%rt?yhy%>#gR)sbK5J%7Va#R$%Ynjx
zHA|wI5v~_BXfB;4Q^SxVP{NQUo5J7AT+36#QNt52SHo1p6E9!GT*DKuP{UTk6R%jq
zUc(d5S;G^|peaaXj{`h_4H?UZ4fsMA8=-eQh#lpw5)4WO7YquO`K3kR;o~AL1_lO<
zP8~^u-bLD=&W;X<&;=3TE(4eV_Z##W7#N;0GB6Z_Mnl;cs<hEcM!ooW&~Px;f$m!@
zxrxQuMHV1qLF3YzOpxJe(5#9k6L_#6OlT^BN4$&lK?*@5uD4h~rDPFk^6VCCW=U#p
z@hzeFg8TyTQdY2ipmO~dTVg>$YF-N1&|55^G8Hl|&I%q!F9MH(gZgDf#-Pz__995=
z-QskuNKP#PEs2DThJyymi%dawgGQ!rv1cR}Czg~H-D1j3yu}0ZL23$Qtu4eS?jXZh
zKo!L;j-u3La3c}a0RsoaE!MQ0{KOK(0u1oTc@b#(PLuZ*b8=2G#1Ko6b)Z4@A}bKf
z8bp8;71@ATwje8*vr{W?ait`J*TIyg7T;n_y~UiLl!es21IJJ(0|Ub{Pz*JII&aW%
zbr#ULIwKb&2cr-psE^6R$ib+<$i>LV$ic`39cu@POMzvjU?NN~IRQp4Mgh<`JR=(u
z6C)QR52FyH1fvk66bEQjUV>4Eg@;jsk%v)+QG`hZIyw)YC=|z$KH$@8pk5}Z4+^f2
zR)Lq%fEUr&l`zyWmM~^9H8T}!f!WNUrJcoEC9GNCl_Et3HH@`Ph0AIfv)DmXZQ!Y|
z41~&}s1oKZ4$$0A8o2Dr^8w3&O)M%aVMLKLfSWp@gcU_b3}y<<7KmvzpfwasevtIa
zSp-U@MS-Ab;`U7}&Q47MHxrPOF=8A6lpAlcWfYerCTD9hL*hFU6wBa92j?^}0gg;B
z1_p*7pvVLz1_mY$M$nJ|8xtQh52FAh2y-zCFo~he$AWFv!^|Ei3zy&_9tH|=#uSEZ
zjv}5EhF}Iz7^N_#Ff@Z!i!g$DOi&&Zn8ys|F@t$5P#z1I#|q`Kf=tq6^Ft)|A|H?s
zK~qfNtf0wui#<KH1eC3Ct6>6<o)krcbb><!On`#rmQXw-2gavm<|XE2R;9+r^Dr?m
z6oaP07#KMiSeW=2ArN_{8X7VfT2Ts2P<jO6Vgv9fSP5u>F;f;(Gea#SWONI(xHpYa
zoS~Mfh7r`psbNfEmSlh~hGp>sPe6i`1-Qi5WVyv!Tw0J?ROALq9bED8MX4#J$)IU)
zj(E`UT}gbrCS;r_inky$IXfpcCBC>Mu_QH$6I6nLnV^|%c%v1P-$B7pl*quqAPVv~
zXfX@}6CaZRQ<W5EhJ)FNkpjT}1Qqq5rE6Ir|1m?_Y$Z%3%ry+nOrQzu6s8mwa6Qb(
z$N-L4R>-n4h#x`ALBVTGG+B$1Kt5zEEdZ_2D@p)G8`#f0@$sO6R*;|K<250%dW#k6
zu_Dlr6Oxxw85kI3L0$r-PzFW;#ww&GXRruF7zZy#K*~XDk-)_WsFDKB%7YhFF=R1=
z8cPr_G&921E{ijyFr>iO-D$$IG7C7*-eN8;%>`Ez;N+>v0!fPCtjnC7p96M&Q3=G|
zh-3*$)A7mqIf@7mNHJE4qsItnNH#e?2Ob?Dw}WC3>~<GuvddyBVFV3vX0af<9#qJc
zfEug}ps0~#05^-7YoLCqVX9$<tt5o|h!r$@T>|!2Q93A6K(!x+w~8`Bk;hV8l3#%C
zD-Mu*LG_y|$Xg(XGBC<93NcnmVDlJAAv{(<F#^&6_8Dl)O$h_2NXOwbP?5r1gX}Yw
z8kQPn2?p@`1V%}QY^I_-FdMvjKZ`Ak6_f!XqVO<au3-U%fek~RObL4ma}A3aXstQ}
zOtlz;1OuqC3Qo!_kOCZ>Q>#=Vjj8-Ja8pVlB|jCi@H;oLBsoJtLp4Q5K{Z8FlcOjb
z6pEZ60yR9r4G&0t4w_ciWJRPJh$twGi*i63!D$9efHLwe4$%AoDCBfNK@4gSL4sI=
zQH+TROF+YvV-ILhrGY#7qsxGLKJd)J0uFv~5Y|AKq-ruF1(GH!wuA)s9kO?CF&8D4
zgOe0i?}GD5QDV6%$hV;M!N4fRD2F4b6eX6!y@`@jO29b<w2BteurJD}VafteOVluA
zL0Q>MMFpS&8?*|ymbru_3$z9XwBUmo<ZaNh(q0xuP`8g6v>GQ1T%b2If<#ytve-c#
zS0;vdcJM9;@ZJz`s$oSA5XM_9i4~c}RZ@Pb<=|;L&_ujCXbVDSNoHbBVpVF2CMUS*
zij+LSEj><9j$_NrD^4vcDe?xT3Q#2vZi7M#oLh`#;Gm6S0}r}kuA@jyfh^*+0tF<b
z^5bI^W8?z2%><Y*b0|y$N|gr+Rd5ak^&T=AY8fjON<d-4*bM4q3xW52FoD)8)iBmD
z*)S9efI9NQ44~x|%(qy<yB5GL+M-;<q(>B}$(~;XDlyT^6ZZI$d{BGd1LSB>{lUP<
z!YBYLP%uhLs7jDcf@pIbMXsO_12uMw)IlszqA!XEu|TzA5vU$1$^vmgjw;Fnu?j#0
zDAg2|fr6eFTBRnZ=H$f3BV4J;2&t4nITKuKg8~b@=!3(Fg^??WM}&iuhl7WOgN1{g
zgOh`mgAbw#)ZB()P>_Py#h~^(%G?QLP6V{^i4%4HB$X|TJ%t^#5ehV$0-pWg2xida
zyv2nWA4{Cb1fBSBMBWbwn~j1F_KePc!lrR?&VG_TDhL`3M8pT$I3Q?eh6ZGI7&43p
z8(IVp5TeaHgSxEH312<5id5YW8k4#To}i^+&}nS&6adzBKp^jc%u}!e&xM1B{vh*>
z3RVjFB^jwj#R`c<>7}{gJ)U4`0)ypX(-ah}6sn<%B|ytTY8B82LcvD)!CNw*1|<lC
zmcAB)8kV5?9^8-wmmi=8WDP?+a}B7&!C9mM8Bzp^fXh`z&`c({-3VHT1)jUS#R{HY
z2WK#7GZ|bW7J-^cu#5_?N5NVO;l&Dz03#bC2V<2mBEsO4rl33x%AgFO6@nlPE&xGw
z5vVN;D%>D;G8R>W3N=PZRSvcZOn?gq&`3r#$U4wq2m?zMKaxeD0ECANNFHQ1*b>mF
z0;s!G!U&pH2h~X>Oj&GcEX_>CN+s-B9H3#uA}#Quea<|N60Q_RczcD(ugVCrd`AJY
zP!P02BN07Pz{?>^b73o_^fb94{ZmkD98!IQM^-@H)uI?sNywg_nwMIXnT#~s4e~W)
z5*yNBhlD!V>tF)z|3;AiL4IRkQexy_0u|MqSc_`#)Hw%ak6=k9*t}b8#i=DFsYONQ
zppqGEE{K4|E7%TDQV0au0jeoD<XD*axOmu#K%$z$;5y(Ib7E3*5h#RjvF3q%4ff(K
z4lr9UHxbe;=P6FkEU46jO!(hogU}HFf)XrvwO^4V$Qe!`0yG&88Gix?8fY5~*iJ+(
t0bzlhc#Fd(7rbBF4zw_^7&MX3!U!5*;bP=s5{lweld5NM6RBs&2LM(YYy<!R

diff --git a/multimodal/datasets/data_sample.py b/multimodal/datasets/data_sample.py
index ed3ab6d..9f6d730 100644
--- a/multimodal/datasets/data_sample.py
+++ b/multimodal/datasets/data_sample.py
@@ -33,7 +33,6 @@ class MultiModalData(metaclass=ABCMeta):
     def _first_validate_views_ind(views_ind, n_features):
         """Ensure proper format for views_ind and return number of views."""
         views_ind = np.array(views_ind)
-
         if np.issubdtype(views_ind.dtype, np.integer) and views_ind.ndim == 1:
             if len(views_ind) > 2 and np.any(views_ind[:-1] >= views_ind[1:]):
                 raise ValueError("Values in views_ind must be sorted.")
@@ -79,7 +78,6 @@ class MultiModalData(metaclass=ABCMeta):
     def _validate_views_ind(self, views_ind, n_features):
         """Ensure proper format for views_ind and return number of views."""
         views_ind = np.array(views_ind)
-
         if np.issubdtype(views_ind.dtype, np.integer) and views_ind.ndim == 1:
             if len(views_ind) > 2 and np.any(views_ind[:-1] >= views_ind[1:]):
                 raise ValueError("Values in views_ind must be sorted.")
@@ -228,129 +226,6 @@ class MultiModalSparseArray(sp.csr_matrix, sp.csc_matrix, MultiModalSparseInfo,
                sp.csc_matrix.__init__(self, *arg, **kwargs)
 
 
-# class MultiModalSparseArray(sp.csr_matrix, sp.csc_matrix, MultiModalData):
-#     """
-#         MultiModalArray inherit from numpy ndarray
-#
-#
-#         Parameters
-#         ----------
-#
-#         data : can be
-#              - dictionary of multiview array with shape = (n_samples, n_features)  for multi-view
-#                   for each view.
-#                {0: array([[]],
-#                 1: array([[]],
-#                 ...}
-#              - numpy array like with shape = (n_samples, n_features)  for multi-view
-#                   for each view.
-#                 [[[...]],
-#                  [[...]],
-#                  ...]
-#              - {array like} with (n_samples, nviews *  n_features) with 'views_ind' diferent to 'None'
-#                 for Multi-view input samples.
-#
-#
-#
-#
-#         views_ind : array-like (default= None ) if None
-#                     [0, n_features//2, n_features]) is constructed (2 views)
-#                     Paramater specifying how to extract the data views from X:
-#
-#             - views_ind is a 1-D array of sorted integers, the entries
-#               indicate the limits of the slices used to extract the views,
-#               where view ``n`` is given by
-#               ``X[:, views_ind[n]:views_ind[n+1]]``.
-#
-#         Attributes
-#         ----------
-#
-#         view_ind : list of views' indice  (may be None)
-#
-#         n_views : int number of views
-#
-#         shapes_int: list of int numbers of feature for each views
-#
-#         keys : name of key, where data come from a dictionary
-#
-#
-#         :Example:
-#
-#         >>> from multimodal.datasets.base import load_dict
-#         >>> from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-#         >>> from multimodal.datasets.data_sample import DataSample
-#         >>> file = 'input_x_dic.pkl'
-#         >>> data = load_dict(get_dataset_path(file))
-#         >>> print(data.__class__)
-#         <class 'dict'>
-#         >>> multiviews = MultiModalArray(data)
-#         >>> multiviews.shape
-#         (120, 240)
-#         >>> multiviews.keys
-#         dict_keys([0, 1])
-#         >>> multiviews.shapes_int
-#         [120, 120]
-#         >>> multiviews.n_views
-#         2
-#
-#
-#         """
-#
-#     def __init__(self, data, view_ind=None, shape=None, dtype=None, copy=False):
-#         """Constructor of Metriclearn_array"""
-#         shapes_int = []
-#         index = 0
-#         new_data = np.ndarray([])
-#         n_views = 1
-#         thekeys = None
-#         # view_ind_self =  None
-#         view_mode = 'slices'
-#         if isinstance(data, tuple) and len(data)  == 3:
-#             data_data = data[0]
-#             indices = data[1]
-#             indptr = data[2]
-#             data_shape = shape
-#         else:
-#             if shape is None:
-#                 data_shape = data.shape
-#             if dtype is None:
-#                dtype = data.dtype
-#             data_data = data.data
-#             data_indices = data.indices
-#             data_indptr = data.indptr
-#         if (sp.issparse(data)) and data.ndim > 1:
-#             if  view_ind is not None:
-#                 try:
-#                     view_ind = np.asarray(view_ind)
-#                 except :
-#                     raise TypeError("n_views should be list or nparray")
-#             elif view_ind is None:
-#                 if data.shape[1] > 1:
-#                     view_ind = np.array([0, data.shape[1]//2, data.shape[1]])
-#                 else:
-#                     view_ind = np.array([0, data.shape[1]])
-#
-#             new_data = data
-#             # view_ind_self = view_ind
-#         view_ind, n_views, view_mode = self._first_validate_views_ind(view_ind,
-#                                                                       data_shape[1])
-#         if view_ind.ndim == 1 and view_mode.startswith("slicing"):
-#             shapes_int = [in2 - in1 for in1, in2 in zip(view_ind, view_ind[1:])]
-#         if isinstance(data, sp.csr_matrix) :
-#             sp.csr_matrix.__init__(self, (data_data, data_indices, data_indptr), shape=data_shape)
-#             #sp.csr_matrix.__init__(self, data)
-#         elif isinstance(data, sp.csc_matrix):
-#             sp.csc_matrix.__init__(self, (data_data, data_indices, data_indptr), shape=data_shape)
-#             #sp.csc_matrix.__init__(self, data)
-#         else:
-#             raise TypeError("This sparse format is not supported")
-#         if self.shape[0] < 1 or self.shape[1] < 1:
-#             raise ValueError("input data shouldbe not empty")
-#         self.view_mode_ = view_mode
-#         self.views_ind = view_ind
-#         self.shapes_int = shapes_int
-#         self.n_views = n_views
-
 
 class MultiModalArray(np.ndarray, MultiModalData):
     """
@@ -420,7 +295,7 @@ class MultiModalArray(np.ndarray, MultiModalData):
 
     """
     def __new__(cls, data, view_ind=None):
-        """Constructor of Metriclearn_array"""
+        """Constructor of MultiModalArray_array"""
         shapes_int = []
         index = 0
         new_data = np.ndarray([])
@@ -430,24 +305,30 @@ class MultiModalArray(np.ndarray, MultiModalData):
         view_mode = 'slices'
         if isinstance(data, dict):
             n_views = len(data)
+            view_ind = [0]
             for key, dat_values in data.items():
                 new_data = cls._populate_new_data(index, dat_values, new_data)
                 shapes_int.append(dat_values.shape[1])
+                view_ind.append(dat_values.shape[1] + view_ind[index])
                 index += 1
             thekeys = data.keys()
-        if isinstance(data, np.ndarray) and view_ind is None and data.ndim == 1:
+
+        elif isinstance(data, np.ndarray) and view_ind is None and data.ndim == 1:
+            try:
+                dat0 = np.array(data[0])
+            except Exception:
+                raise TypeError("input format is not supported")
+
+            if dat0.ndim < 2:
+                data = data[np.newaxis, ...]
+                if data.shape[1] > 1:
+                    view_ind = np.array([0, data.shape[1]//2, data.shape[1]])
+                else:
+                    view_ind = np.array([0, data.shape[1]])
+                new_data = data
+            else:
+                new_data, shapes_int, view_ind = cls._for_data(cls, data)
             n_views = data.shape[0]
-            view_ind = np.empty(n_views+1)
-            view_ind[0] = 0
-            for dat_values in data:
-                try:
-                   dat_values = np.array(dat_values)
-                except:
-                    raise TypeError("input format is not supported")
-                shapes_int.append(dat_values.shape[1])
-                view_ind[index+1] = dat_values.shape[1] + view_ind[index]
-                new_data = cls._populate_new_data(index, dat_values, new_data)
-                index += 1
         elif (isinstance(data, np.ndarray) ) and data.ndim > 1:
             try:
                 data = np.asarray(data)
@@ -468,12 +349,16 @@ class MultiModalArray(np.ndarray, MultiModalData):
         else:
             try:
                 new_data = np.asarray(data)
-                if new_data.ndim == 1:
-                    new_data = new_data.reshape(1, new_data.shape[0])
-                view_ind = np.array([0, new_data.shape[1]])
+                # if new_data.ndim == 1:
+                #     new_data = new_data.reshape(1, new_data.shape[0])
+                if view_ind is None:
+                    view_ind = np.array([0, new_data.shape[1]])
             except  Exception as e:
                 raise ValueError('Reshape your data')
 
+            if new_data.ndim < 2 or new_data.shape == (1, 1) or view_ind[-1] > new_data.shape[1]:
+                raise ValueError('Reshape your data')
+
             # view_ind_self = view_ind
         # if new_data.shape[1] < 1:
         #     msg = ("%d feature\(s\) \\(shape=\%s\) while a minimum of \\d* "
@@ -482,7 +367,7 @@ class MultiModalArray(np.ndarray, MultiModalData):
         #     raise ValueError(msg)
         view_ind, n_views, view_mode = cls._first_validate_views_ind(view_ind,
                                                                       new_data.shape[1])
-        if view_ind.ndim == 1 and view_mode.startswith("slicing"):
+        if view_ind.ndim == 1 and view_mode.startswith("slices"):
             shapes_int = [in2 - in1 for in1, in2 in zip(view_ind, view_ind[1:])]
         # obj =   ma.MaskedArray.__new(new_data)   # new_data.view()  a.MaskedArray(new_data, mask=new_data.mask).view(cls)
         # bj = super(Metriclearn_array, cls).__new__(cls, new_data.data, new_data.mask)
@@ -501,6 +386,25 @@ class MultiModalArray(np.ndarray, MultiModalData):
         obj.keys = thekeys
         return obj
 
+    @staticmethod
+    def _for_data(cls, data):
+        n_views = data.shape[0]
+        index = 0
+        view_ind = np.empty(n_views + 1, dtype=np.int)
+        view_ind[0] = 0
+        shapes_int = []
+        new_data = np.ndarray([])
+        for dat_values in data:
+            try:
+                dat_values = np.array(dat_values)
+            except Exception:
+                raise TypeError("input format is not supported")
+            new_data = cls._populate_new_data(index, dat_values, new_data)
+            view_ind[index + 1] = dat_values.shape[1] + view_ind[index]
+            shapes_int.append(dat_values.shape[1])
+            index += 1
+        return new_data, shapes_int, view_ind
+
     @staticmethod
     def _populate_new_data(index, dat_values, new_data):
         if index == 0:
diff --git a/multimodal/tests/__pycache__/__init__.cpython-36.pyc b/multimodal/tests/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index 1e99420542d93339ad39b73b8ba0b9c48bc020ac..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 168
zcmXr!<>k7_^fiWofq~&Mh=5^61_lNP1_p*=76t}}6oz01O-8?!3`HPe1o10WKO;Xk
zRX-&^H#0A@uryV_peR2pwWL_z(Jx5XC$lWISid+qGdr_HH@7sWBr`WZB{3%_HL)m9
oAHmlzNi8lZ){l?R%*!l^kJl@xyv1RYo1apelWGUDt{CJb0I5hVp#T5?

diff --git a/multimodal/tests/data/__pycache__/__init__.cpython-36.pyc b/multimodal/tests/data/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index e5ff6f0d7034f50a62323bf1f47e69b0ec338479..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 169
zcmXr!<>g9k{2t4|z`*brM8Gg30|SEt0|P@b3j+f~3PUi1CZpd<h9ZzKg7_7tpOK%N
zs-Kddo0*qcSemL|P?VpQT2id<=oh5xlUbHpte;z&Q<9mSpO;ozoSC18&MiqTE-BVe
jNi0d!kB`sH%PfhH*DI*J#bJ}1pHiBWY6r5f8001Z_opsN

diff --git a/multimodal/tests/data/__pycache__/get_dataset_path.cpython-36.pyc b/multimodal/tests/data/__pycache__/get_dataset_path.cpython-36.pyc
deleted file mode 100644
index 99a01a1c57ae74bcb18ca8377f9c799fc5fb125a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 817
zcmXr!<>jiW{~jB}#K7>F0SPcMFfceUFfbIWFfcHrFr+Z%Fyt~uG3GKwF)@PpOgYR^
z%qh$%EG-OCEGbOE44SMjLE1H$Zt)cqW#*N{r<LX<mt^MW-Qq~eEXyp;%+K>nW&|l=
zU|;~5&dk8T;0&@siGhKkgdvMDiwR6Jm#{Q5HZ#^Tf_bbpj5Q3+AoCd$;~5zkf>KLL
zi}DmoGEx;1lZx|mN=s4|3KB~)6!Oy)5*3P4(^894^O92)QW8rNi&IM!((;QGN>YnU
zin+KH6ciM66+H6_N=p<95{nXZQ%h2dtRUi6U;%~Vk|Kq)%$(G`#N1RX1-HzcR0R+d
zWH`u1kd#7leqKppW?p7qI+}H03qY2@?Nrbx&&W*9P{=JUE>TEIRmjX!NJuEoPS4Cq
zO)SdO2fIrj>e6EUgal29V{{e#OG{8(SqyV#5!n01Rtg~)0Z^WiT9gV77_c+TGfOfc
zj@Qxx34r{erKONrtWaE%UzC~xcCA%WNo7H*70BNZt2NnfG36KEVgUu|E%ubmB9OLQ
z?1@RmU=fb^c#v7~@wZsA@-y=^nQn1__#j15yfBZ%gOsjhDB@;dVE7fMpOK%Ns-Kdd
zo0*qcSemL|P?VpQT2id<=oh5xlUbHpte;z&Q<9mSpO;n&N+jr9L?G#>r<TM+Edkr1
zS5SFN09}w1l%!Zd>5N~5v51#}fkBh?7FT?HT4_mXQEGgA5h(2yff8R4NEKKG*r`Ya
b4+8_kEe@O9{FKt1R6CHeVh|y~#K8;z(oyhc

diff --git a/multimodal/tests/datasets/__pycache__/__init__.cpython-36.pyc b/multimodal/tests/datasets/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index a2c8d7aba3ba76053fed1475db7036e08f9fc6bd..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 182
zcmXr!<>g|SyD^r5fq~&Mh=5^61_lNP1_p*=76t}}6oz01O-8?!3`HPe1o10bKO;Xk
zRX-&^H#0A@uryV_peR2pwWL_z(Jx5XC$lWISU<NkrzEp1GqpT7wWKIBIVUx-C@(WF
vT_25Gl3H9+te=utl2{DVA0MBYmst`YuUAlci^C>2KczG$)edBNG01@cO+GUg

diff --git a/multimodal/tests/datasets/__pycache__/get_dataset_path.cpython-36.pyc b/multimodal/tests/datasets/__pycache__/get_dataset_path.cpython-36.pyc
deleted file mode 100644
index 72e442b9eaa3fe3d85f23eac5c5fcf430c5cedf9..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 830
zcmXr!<>fjbS{56`#K7>F0SPcMFfceUFfbIWFfcHrFr+Z%Fyt~uG3GKwF)@PpOgYR^
z%qh$%EG-OCEGbOE44SMjLE1H$Zt)cqW#*N{r<LX<mt^MW-Qq~eEXyp;%+K>nW&|l=
zU|;~5&dk8T;0&@siGhKkgdvMDiwR6Jm#{Q5HZ#^Tf_bbpj5Q3+AoCd$;~5zkf>KLL
zi}DmoGEx;1lZx|mN=s4|3KB~)6!Oy)5*3P4(^894^O92)QW8rNi&IM!((;QGN>YnU
zin+KH6ciM66+H6_N=p<95{nXZQ%h2dtRUi6U;%~Vk|Kq)%$(G`#N1RX1-HzcR0R+d
zWH`u1kd#7leqKppW?p7qI+}H03qY2@?Nrbx&&W*9P{=JUE>TEIRmjX!NJuEoPS4Cq
zO)SdO2fIrj>e6EUgal29V{{e#OG{8(SqyV#5!n01Rtg~)0Z^WiT9gV77_c+TGfOfc
zj@Qxx34r{erKONrtWaE%UzC~xcCA%WNo7H*70BNZt2NnfG36KEVgUu|E%ubmB9OLQ
z?1@RmU=fb^c#v7~@wZsA@-y=^nQn1__+Ul6FptE8l&)kb;$dK5_?4}nk)NBYpOT-O
znU`5unyOz=l%JJaQmpUj7o_WxS(aL?pIe$!l3A9STArI)Qk0nt4x7xpbbT}~T4<%G
zmc&DC1Y4+AP<cxLU62!$zF0tsj$eeah>wARL6h|sSA2Y0X-R2OYJ7YVC?OVsQeqKE
i6<7t>-ADv40|Ub?4x8Nkl+v73JCL$s5Fx<C!3+S{GWGEQ

diff --git a/multimodal/tests/test.py b/multimodal/tests/test.py
deleted file mode 100644
index 9a68d84..0000000
--- a/multimodal/tests/test.py
+++ /dev/null
@@ -1,224 +0,0 @@
-
-from abc import ABCMeta
-import numpy as np
-import numpy.ma as ma
-import scipy.sparse as sp
-
-from multimodal.boosting.mumbo import MumboClassifier
-
-class MultiModalData(metaclass=ABCMeta):
-
-    @staticmethod
-    def _first_validate_views_ind(views_ind, n_features):
-        """Ensure proper format for views_ind and return number of views."""
-        views_ind = np.array(views_ind)
-        if np.issubdtype(views_ind.dtype, np.integer) and views_ind.ndim == 1:
-            if np.any(views_ind[:-1] >= views_ind[1:]):
-                raise ValueError("Values in views_ind must be sorted.")
-            if views_ind[0] < 0 or views_ind[-1] > n_features:
-                raise ValueError("Values in views_ind are not in a correct "
-                                 + "range for the provided data.")
-            view_mode_ = "slices"
-            n_views = views_ind.shape[0]-1
-        else:
-            if views_ind.ndim == 1:
-                if not views_ind.dtype == np.object:
-                    raise ValueError("The format of views_ind is not "
-                                     + "supported.")
-                for ind, val in enumerate(views_ind):
-                    views_ind[ind] = np.array(val)
-                    if not np.issubdtype(views_ind[ind].dtype, np.integer):
-                        raise ValueError("Values in views_ind must be "
-                                         + "integers.")
-                    if views_ind[ind].min() < 0 \
-                            or views_ind[ind].max() >= n_features:
-                        raise ValueError("Values in views_ind are not in a "
-                                         + "correct range for the provided "
-                                         + "data.")
-            elif views_ind.ndim == 2:
-                if not np.issubdtype(views_ind.dtype, np.integer):
-                    raise ValueError("Values in views_ind must be integers.")
-                if views_ind.min() < 0 or views_ind.max() >= n_features:
-                    raise ValueError("Values in views_ind are not in a "
-                                     + "correct range for the provided data.")
-            else:
-                raise ValueError("The format of views_ind is not supported.")
-            view_mode_ = "indices"
-            n_views = views_ind.shape[0]
-        return (views_ind, n_views, view_mode_)
-
-    def _extract_view(self, ind_view):
-        """Extract the view for the given index ind_view from the dataset X."""
-        if self.view_mode_ == "indices":
-            return self[:, self.views_ind[ind_view]]
-        else:
-            return self[:, self.views_ind[ind_view]:self.views_ind[ind_view+1]]
-
-    def _validate_views_ind(self, views_ind, n_features):
-        """Ensure proper format for views_ind and return number of views."""
-        views_ind = np.array(views_ind)
-        if np.issubdtype(views_ind.dtype, np.integer) and views_ind.ndim == 1:
-            if np.any(views_ind[:-1] >= views_ind[1:]):
-                raise ValueError("Values in views_ind must be sorted.")
-            if views_ind[0] < 0 or views_ind[-1] > n_features:
-                raise ValueError("Values in views_ind are not in a correct "
-                                 + "range for the provided data.")
-            self.view_mode_ = "slices"
-            n_views = views_ind.shape[0]-1
-        else:
-            if views_ind.ndim == 1:
-                if not views_ind.dtype == np.object:
-                    raise ValueError("The format of views_ind is not "
-                                     + "supported.")
-                for ind, val in enumerate(views_ind):
-                    views_ind[ind] = np.array(val)
-                    if not np.issubdtype(views_ind[ind].dtype, np.integer):
-                        raise ValueError("Values in views_ind must be "
-                                         + "integers.")
-                    if views_ind[ind].min() < 0 \
-                            or views_ind[ind].max() >= n_features:
-                        raise ValueError("Values in views_ind are not in a "
-                                         + "correct range for the provided "
-                                         + "data.")
-            elif views_ind.ndim == 2:
-                if not np.issubdtype(views_ind.dtype, np.integer):
-                    raise ValueError("Values in views_ind must be integers.")
-                if views_ind.min() < 0 or views_ind.max() >= n_features:
-                    raise ValueError("Values in views_ind are not in a "
-                                     + "correct range for the provided data.")
-            else:
-                raise ValueError("The format of views_ind is not supported.")
-            self.view_mode_ = "indices"
-            n_views = views_ind.shape[0]
-        self.views_ind = views_ind
-        self.n_views = n_views
-        return (views_ind, n_views)
-
-class MultiModalSparseInfo():
-
-    def __init__(self, data, view_ind=None):
-        """Constructor of Metriclearn_array"""
-        shapes_int = []
-        index = 0
-        new_data = np.ndarray([])
-        n_views = data.size
-        thekeys = None
-        # view_ind_self =  None
-        view_mode = 'slices'
-
-        if (sp.issparse(data)) and data.ndim > 1:
-            if  view_ind is not None:
-                try:
-                    view_ind = np.asarray(view_ind)
-                except :
-                    raise TypeError("n_views should be list or nparray")
-            elif view_ind is None:
-                if data.shape[1] > 1:
-                    view_ind = np.array([0, data.shape[1]//2, data.shape[1]])
-                else:
-                    view_ind = np.array([0, data.shape[1]])
-
-            new_data = data
-            # view_ind_self = view_ind
-        view_ind, n_views, view_mode = self._first_validate_views_ind(view_ind,
-                                                                      data.shape[1])
-        if view_ind.ndim == 1 and view_mode.startswith("slicing"):
-            shapes_int = [in2 - in1 for in1, in2 in zip(view_ind, view_ind[1:])]
-
-        if data.shape[0] < 1 or data.shape[1] < 1:
-            raise ValueError("input data shouldbe not empty")
-        self.view_mode_ = view_mode
-        self.views_ind = view_ind
-        self.shapes_int = shapes_int
-        self.n_views = n_views
-
-
-class MultiModalSparseArray(sp.csr_matrix, sp.csc_matrix, MultiModalSparseInfo, MultiModalData):
-    """
-    MultiModalArray inherit from numpy ndarray
-
-
-    Parameters
-    ----------
-
-    data : can be
-             - dictionary of multiview array with shape = (n_samples, n_features)  for multi-view
-                  for each view.
-               {0: array([[]],
-                1: array([[]],
-                ...}
-             - numpy array like with shape = (n_samples, n_features)  for multi-view
-                  for each view.
-                [[[...]],
-                 [[...]],
-                 ...]
-             - {array like} with (n_samples, nviews *  n_features) with 'views_ind' diferent to 'None'
-                for Multi-view input samples.
-
-
-
-
-        views_ind : array-like (default= None ) if None
-                    [0, n_features//2, n_features]) is constructed (2 views)
-                    Paramater specifying how to extract the data views from X:
-
-            - views_ind is a 1-D array of sorted integers, the entries
-              indicate the limits of the slices used to extract the views,
-              where view ``n`` is given by
-              ``X[:, views_ind[n]:views_ind[n+1]]``.
-
-        Attributes
-        ----------
-
-        view_ind : list of views' indice  (may be None)
-
-        n_views : int number of views
-
-        shapes_int: list of int numbers of feature for each views
-
-        keys : name of key, where data come from a dictionary
-
-
-    :Example:
-
-    >>> from multimodal.datasets.base import load_dict
-    >>> from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-    >>> from multimodal.datasets.data_sample import DataSample
-    >>> file = 'input_x_dic.pkl'
-    >>> data = load_dict(get_dataset_path(file))
-
-    """
-
-    def __init__(self, *arg, **kwargs ):
-        """Constructor of Metriclearn_array"""
-        if sp.issparse(arg[0]):
-            MultiModalSparseInfo.__init__(self, *arg)
-            if isinstance(arg[0], sp.csr_matrix) :
-                sp.csr_matrix.__init__(self, arg[0])
-            elif isinstance(arg[0], sp.csc_matrix):
-                sp.csc_matrix.__init__(self, arg[0])
-            else:
-                raise TypeError("This sparse format is not supported")
-        else:
-            if isinstance(self,sp.csr_matrix):
-               sp.csr_matrix.__init__(self, *arg, **kwargs)
-            elif isinstance(self, sp.csc_matrix):
-               sp.csc_matrix.__init__(self, *arg, **kwargs)
-
-
-
-
-if __name__ == '__main__':
-    rng = np.random.RandomState(0)
-    X = rng.rand(40, 10)
-    X[X < .8] = 0
-    X_csr = sp.csr_matrix(X)
-    y = (4 * rng.rand(40)).astype(np.int)
-    X_ = MultiModalSparseArray(X_csr)
-    print(X_.shape)
-    print(X_[:,0:1])
-
-    X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
-    y = [1, 1, 1, 2, 2, 2]
-    clf =  MumboClassifier()
-    clf.fit(X, y)
\ No newline at end of file
diff --git a/multimodal/tests/test_data_sample.py b/multimodal/tests/test_data_sample.py
index 04c6b49..b21d4af 100644
--- a/multimodal/tests/test_data_sample.py
+++ b/multimodal/tests/test_data_sample.py
@@ -33,7 +33,7 @@ class UnitaryTest(unittest.TestCase):
         np.testing.assert_almost_equal(a.get_view(0), self.kernel_dict[0], 8)
         np.testing.assert_almost_equal(a.get_view(1), self.kernel_dict[1], 8)
 
-    def test_init_Metriclearn_array(self):
+    def test_init_Multimodal_array(self):
         a = MultiModalArray(self.kernel_dict)
         self.assertEqual(a.shape, (120, 240))
         self.assertEqual(a.shapes_int, [120, 120])
@@ -48,3 +48,5 @@ class UnitaryTest(unittest.TestCase):
         np.testing.assert_equal(b.views_ind, np.array([0, 120, 240]))
 
 
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/multimodal/tests/test_mumbo.py b/multimodal/tests/test_mumbo.py
index 978244f..f22f5ba 100644
--- a/multimodal/tests/test_mumbo.py
+++ b/multimodal/tests/test_mumbo.py
@@ -705,8 +705,8 @@ class TestMuCumboClassifier(unittest.TestCase):
 
 
     def test_classifier(self):
-        X_zero_features = np.empty(0).reshape(3, 0)
-        y = np.array([1, 0, 1])
+        # X_zero_features = np.empty(0).reshape(3, 0)
+        # y = np.array([1, 0, 1])
         # e = MumboClassifier()
         # e.fit(X_zero_features, y)
         # print(e.predict(X_zero_features))
-- 
GitLab