diff --git a/.gitignore b/.gitignore
index cc53efad33c4618a2d89ea837e94cab2822d8fb0..41ddd6927fde1dca00ab937e2195b442a9c78b57 100644
--- a/.gitignore
+++ b/.gitignore
@@ -87,3 +87,16 @@ scikit_gilearn.egg-info/
 scikit_splearn.egg-info/*
 .idea/
 *.tar.gz
+.pytest_cache/
+examples/simple_example-2.json.gv
+examples/simple_example-2.json.gv.pdf
+examples/simple_example-2.yaml.gv
+examples/simple_example-2.yaml.gv.pdf
+examples/simple_example.json
+examples/simple_example.json.gv
+examples/simple_example.json.gv.pdf
+examples/simple_example.yaml
+examples/simple_example.yaml.gv
+examples/simple_example.yaml.gv.pdf
+examples/simple_example_hankel.json
+examples/simple_example_hankel.yaml
diff --git a/docker/Dockerfile b/docker/Dockerfile
index d2d52e587289dfe7a805d7351960fa8717410b84..ff71df4696bd9e93f9705b1de1862f8054f99776 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,11 +7,11 @@ RUN apt-get update && apt-get install -y \
     python3-scipy \
     graphviz-dev
 RUN pip3 install --upgrade pip
-RUN pip3 install pyyaml nose coverage sphinx sphinxcontrib-bibtex
+RUN pip3 install pyyaml nose coverage pytest pytest-coverage pytest-html sphinx sphinxcontrib-bibtex
 
 # Copy the scikit-splearn sdist in the docker directory and uncomment the following line
 # if you want to include grackelpy sources in the docker image :
-# ADD scikit-splearn-1.1.0.tar.gz /
+ADD scikit-splearn-1.1.0.tar.gz /
 
 
 # cleanup
diff --git a/setup.cfg b/setup.cfg
index e44d1f85e5179d7d99b21a3acb00e7fc579609aa..413aca51d9e970ffc9e70e75359088c957f23380 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,18 +5,17 @@ with-coverage=1
 cover-package=splearn
 cover-erase=1
 cover-html=1
-cover-html-dir=../htmlcov
+cover-html-dir=../build/htmlcov
 
 
 # Options for py.test command
-[pytest]
+[tool:pytest]
 # Specifies a minimal pytest version required for running tests.
-minversion = 2.6
+minversion = 3.0
 # Specifies the options
-addopts = --resultlog=pytests_results.txt -k "not _old" --cov-report term-missing --cov=sksplearn
+addopts = --cov-config .coveragerc --html=build/pytest_report.html -k "not _old" --cov-report html:build/htmlcov --cov=splearn
 # Set the directory basename patterns to avoid when recursing for test discovery.
-norecursedirs = .git sandboxes .settings .cache htmlcov doc references
-
+norecursedirs = .git sandboxes .settings .cache htmlcov doc references build
 
 [coverage:run]
 source=./splearn
diff --git a/setup.py b/setup.py
index 37c9e14db8b58bda38dcba6bc8274dce9dcbd863..190a341882156a4084b5a1859ea905cd4d83e117 100755
--- a/setup.py
+++ b/setup.py
@@ -148,7 +148,7 @@ def setup_package():
                             read('HISTORY.rst') + '\n\n' +
                             read('AUTHORS.rst')),
           packages=["splearn", "splearn.datasets", "splearn.tests", "splearn.tests.datasets"],
-          package_data={'splearn.tests.datasets': ['*.*']},
+          package_data={'splearn.tests.datasets': ['*']},
           url="https://gitlab.lif.univ-mrs.fr/dominique.benielli/scikit-splearn.git",
           license='new BSD',
           author='François Denis and Rémi Eyraud and Denis Arrivault and Dominique Benielli',
diff --git a/splearn/hankel.py b/splearn/hankel.py
index 1758ac98cbd61c82c66b34f70a91946385e9aa1d..faff8619253acd72e824c4efcdfb18e9e3d5d06e 100644
--- a/splearn/hankel.py
+++ b/splearn/hankel.py
@@ -51,20 +51,14 @@ class Hankel(object):
     >>> pT = load_data_sample(adr=train_file)
     >>> sp = Spectral()
     >>> sp.fit(X=pT.data)
-    >>> lhankel = Hankel( sample=pT.sample, pref=pT.pref,
-    >>>                   suff=pT.suff, fact=pT.fact,
+    >>> lhankel = Hankel( sample_instance=pT.sample,
     >>>                   nbL=pT.nbL, nbEx=pT.nbEx,
     >>>                   lrows=6, lcolumns=6, version="classic",
     >>>                   partial=True, sparse=True, mode_quiet=True).lhankel
     
     - Input:
     
-    :param dict sample_instance: sample dictionary
-    :param dict pref: prefix dictionary
-    :param dict suff: suffix dictionary
-    :param dict fact: factor dictionary
-    :param int nbL: the number of letters
-    :param int nbS: the number of states
+    :param Splearn_array sample_instance: instance of Splearn_array
     :param lrows: number or list of rows,
            a list of strings if partial=True;
            otherwise, based on self.pref if version="classic" or
diff --git a/splearn/serializer.py b/splearn/serializer.py
index eae59a7994f997e534a1eadfda322d06d877eec0..f027c9c58fa329bad58a6ed7cee55500422efccd 100644
--- a/splearn/serializer.py
+++ b/splearn/serializer.py
@@ -119,6 +119,10 @@ class Serializer(object):
     
     @staticmethod
     def __restore_yaml(data_str):
+        if data_str is None or isinstance(data_str, (bool, int, float, str)):
+            return data_str
+        if isinstance(data_str, list):
+            return [Serializer.__restore_yaml(k) for k in data_str]
         if "dict" in data_str:
             return dict(data_str["dict"])
         if "tuple" in data_str:
diff --git a/splearn/tests/test_serializer.py b/splearn/tests/test_serializer.py
index bf9cdb30b1085fae1a24a99066edc366ff56c9a4..3a510140b2627983e9881ad138d74957a90394a1 100644
--- a/splearn/tests/test_serializer.py
+++ b/splearn/tests/test_serializer.py
@@ -35,12 +35,14 @@
 # ######### COPYRIGHT #########
 import unittest
 import numpy as np
-import filecmp
 import os
 
 from splearn.automaton import Automaton
 from splearn.hankel import Hankel
+from splearn.serializer import Serializer
+from splearn.spectral import Spectral
 from splearn.tests.datasets.get_dataset_path import get_dataset_path
+from splearn.datasets.base import load_data_sample
 
 class UnitaryTest(unittest.TestCase):
 
@@ -62,13 +64,14 @@ class UnitaryTest(unittest.TestCase):
     def testWriteAutomata(self):
         for f in self.formats:
             Automaton.write(self.A, get_dataset_path(self.input_file + '_2.' + f), format=f)
-            self.assertTrue(filecmp.cmp(get_dataset_path(self.input_file + '_2.' + f), 
-                                        get_dataset_path(self.input_file + '.' + f)))
+            B = Automaton.read(get_dataset_path(self.input_file + '_2.' + f), format=f)
+            for w in self.words:
+                np.testing.assert_almost_equal(self.A.val(w), B.val(w))
         for f in self.formats:
-            os.remove(get_dataset_path(self.input_file + '_2.' + f))           
+            os.remove(get_dataset_path(self.input_file + '_2.' + f))
     
     def testReadHankel(self):
-        for f in self.formats:            
+        for f in self.formats:
             H = self.A.to_hankel([(), (0,), (1,)], [(), (0,), (1,)])
             Hankel.write(H, get_dataset_path(self.input_file + "_hankel" + "." + f), format=f)
             Hb = Hankel.read(get_dataset_path(self.input_file + "_hankel" + "." + f), format = f)
@@ -76,8 +79,40 @@ class UnitaryTest(unittest.TestCase):
             for w in self.words:
                 np.testing.assert_almost_equal(self.A.val(w), B.val(w))
         for f in self.formats:
-            os.remove(get_dataset_path(self.input_file + "_hankel" + "." + f))           
+            os.remove(get_dataset_path(self.input_file + "_hankel" + "." + f))
+    
+#     def testReadWriteRealHankel(self):
+#         adr = get_dataset_path("3.pautomac.train")
+#         data = load_data_sample(adr=adr)
+#         X = data.data
+#         sp = Spectral()
+#         sp = sp.fit(X)
+#         H = Hankel( sample_instance=X.sample,
+#                     lrows=6, lcolumns=6, version="classic",
+#                     partial=True, sparse=True, mode_quiet=True)
+#         for f in self.formats:
+#             Hankel.write(H, get_dataset_path("3.pautomac.train" + "_hankel" + "." + f), format=f)
+#             Hb = Hankel.read(get_dataset_path("3.pautomac.train" + "_hankel" + "." + f), format = f)
+#             self.assertEqual(H, Hb)
+#         for f in self.formats:
+#             os.remove(get_dataset_path("3.pautomac.train" + "_hankel" + "." + f))    
+    
+    def testOthersSerializationTypes(self):
+        data = [{'a' : 10, 40 : 'gu'}, {'toto', 5, 2.5, 'b'}, ('gh', 25, 'ko', 1.0)]
+        data_json_str = Serializer.data_to_json(data)
+        data_yaml_str = Serializer.data_to_yaml(data)
+        data_json = Serializer.json_to_data(data_json_str)
+        data_yaml = Serializer.yaml_to_data(data_yaml_str)
+        self.assertEqual(data, data_json)
+        self.assertEqual(data, data_yaml)
         
+        data = [1, 2, 3.0]
+        data_json_str = Serializer.data_to_json(data)
+        data_yaml_str = Serializer.data_to_yaml(data)
+        data_json = Serializer.json_to_data(data_json_str)
+        data_yaml = Serializer.yaml_to_data(data_yaml_str)
+        self.assertEqual(data, data_json)
+        self.assertEqual(data, data_yaml)
 
 if __name__ == "__main__":
     #import sys;sys.argv = ['', 'Test.testName']