From 53a9edeeaaf4f763bb7b0fd5f7b5f637dbfc807d Mon Sep 17 00:00:00 2001 From: Denis Arrivault <denis.arrivault@lif.univ-mrs.fr> Date: Fri, 16 Feb 2018 18:41:44 +0100 Subject: [PATCH] Add some tests --- .gitignore | 13 +++++++++ docker/Dockerfile | 4 +-- setup.cfg | 11 ++++---- setup.py | 2 +- splearn/hankel.py | 10 ++----- splearn/serializer.py | 4 +++ splearn/tests/test_serializer.py | 47 ++++++++++++++++++++++++++++---- 7 files changed, 68 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index cc53efa..41ddd69 100644 --- a/.gitignore +++ b/.gitignore @@ -87,3 +87,16 @@ scikit_gilearn.egg-info/ scikit_splearn.egg-info/* .idea/ *.tar.gz +.pytest_cache/ +examples/simple_example-2.json.gv +examples/simple_example-2.json.gv.pdf +examples/simple_example-2.yaml.gv +examples/simple_example-2.yaml.gv.pdf +examples/simple_example.json +examples/simple_example.json.gv +examples/simple_example.json.gv.pdf +examples/simple_example.yaml +examples/simple_example.yaml.gv +examples/simple_example.yaml.gv.pdf +examples/simple_example_hankel.json +examples/simple_example_hankel.yaml diff --git a/docker/Dockerfile b/docker/Dockerfile index d2d52e5..ff71df4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -7,11 +7,11 @@ RUN apt-get update && apt-get install -y \ python3-scipy \ graphviz-dev RUN pip3 install --upgrade pip -RUN pip3 install pyyaml nose coverage sphinx sphinxcontrib-bibtex +RUN pip3 install pyyaml nose coverage pytest pytest-coverage pytest-html sphinx sphinxcontrib-bibtex # Copy the scikit-splearn sdist in the docker directory and uncomment the following line # if you want to include grackelpy sources in the docker image : -# ADD scikit-splearn-1.1.0.tar.gz / +ADD scikit-splearn-1.1.0.tar.gz / # cleanup diff --git a/setup.cfg b/setup.cfg index e44d1f8..413aca5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,18 +5,17 @@ with-coverage=1 cover-package=splearn cover-erase=1 cover-html=1 -cover-html-dir=../htmlcov +cover-html-dir=../build/htmlcov # Options for py.test command -[pytest] +[tool:pytest] # Specifies a minimal pytest version required for running tests. -minversion = 2.6 +minversion = 3.0 # Specifies the options -addopts = --resultlog=pytests_results.txt -k "not _old" --cov-report term-missing --cov=sksplearn +addopts = --cov-config .coveragerc --html=build/pytest_report.html -k "not _old" --cov-report html:build/htmlcov --cov=splearn # Set the directory basename patterns to avoid when recursing for test discovery. -norecursedirs = .git sandboxes .settings .cache htmlcov doc references - +norecursedirs = .git sandboxes .settings .cache htmlcov doc references build [coverage:run] source=./splearn diff --git a/setup.py b/setup.py index 37c9e14..190a341 100755 --- a/setup.py +++ b/setup.py @@ -148,7 +148,7 @@ def setup_package(): read('HISTORY.rst') + '\n\n' + read('AUTHORS.rst')), packages=["splearn", "splearn.datasets", "splearn.tests", "splearn.tests.datasets"], - package_data={'splearn.tests.datasets': ['*.*']}, + package_data={'splearn.tests.datasets': ['*']}, url="https://gitlab.lif.univ-mrs.fr/dominique.benielli/scikit-splearn.git", license='new BSD', author='François Denis and Rémi Eyraud and Denis Arrivault and Dominique Benielli', diff --git a/splearn/hankel.py b/splearn/hankel.py index 1758ac9..faff861 100644 --- a/splearn/hankel.py +++ b/splearn/hankel.py @@ -51,20 +51,14 @@ class Hankel(object): >>> pT = load_data_sample(adr=train_file) >>> sp = Spectral() >>> sp.fit(X=pT.data) - >>> lhankel = Hankel( sample=pT.sample, pref=pT.pref, - >>> suff=pT.suff, fact=pT.fact, + >>> lhankel = Hankel( sample_instance=pT.sample, >>> nbL=pT.nbL, nbEx=pT.nbEx, >>> lrows=6, lcolumns=6, version="classic", >>> partial=True, sparse=True, mode_quiet=True).lhankel - Input: - :param dict sample_instance: sample dictionary - :param dict pref: prefix dictionary - :param dict suff: suffix dictionary - :param dict fact: factor dictionary - :param int nbL: the number of letters - :param int nbS: the number of states + :param Splearn_array sample_instance: instance of Splearn_array :param lrows: number or list of rows, a list of strings if partial=True; otherwise, based on self.pref if version="classic" or diff --git a/splearn/serializer.py b/splearn/serializer.py index eae59a7..f027c9c 100644 --- a/splearn/serializer.py +++ b/splearn/serializer.py @@ -119,6 +119,10 @@ class Serializer(object): @staticmethod def __restore_yaml(data_str): + if data_str is None or isinstance(data_str, (bool, int, float, str)): + return data_str + if isinstance(data_str, list): + return [Serializer.__restore_yaml(k) for k in data_str] if "dict" in data_str: return dict(data_str["dict"]) if "tuple" in data_str: diff --git a/splearn/tests/test_serializer.py b/splearn/tests/test_serializer.py index bf9cdb3..3a51014 100644 --- a/splearn/tests/test_serializer.py +++ b/splearn/tests/test_serializer.py @@ -35,12 +35,14 @@ # ######### COPYRIGHT ######### import unittest import numpy as np -import filecmp import os from splearn.automaton import Automaton from splearn.hankel import Hankel +from splearn.serializer import Serializer +from splearn.spectral import Spectral from splearn.tests.datasets.get_dataset_path import get_dataset_path +from splearn.datasets.base import load_data_sample class UnitaryTest(unittest.TestCase): @@ -62,13 +64,14 @@ class UnitaryTest(unittest.TestCase): def testWriteAutomata(self): for f in self.formats: Automaton.write(self.A, get_dataset_path(self.input_file + '_2.' + f), format=f) - self.assertTrue(filecmp.cmp(get_dataset_path(self.input_file + '_2.' + f), - get_dataset_path(self.input_file + '.' + f))) + B = Automaton.read(get_dataset_path(self.input_file + '_2.' + f), format=f) + for w in self.words: + np.testing.assert_almost_equal(self.A.val(w), B.val(w)) for f in self.formats: - os.remove(get_dataset_path(self.input_file + '_2.' + f)) + os.remove(get_dataset_path(self.input_file + '_2.' + f)) def testReadHankel(self): - for f in self.formats: + for f in self.formats: H = self.A.to_hankel([(), (0,), (1,)], [(), (0,), (1,)]) Hankel.write(H, get_dataset_path(self.input_file + "_hankel" + "." + f), format=f) Hb = Hankel.read(get_dataset_path(self.input_file + "_hankel" + "." + f), format = f) @@ -76,8 +79,40 @@ class UnitaryTest(unittest.TestCase): for w in self.words: np.testing.assert_almost_equal(self.A.val(w), B.val(w)) for f in self.formats: - os.remove(get_dataset_path(self.input_file + "_hankel" + "." + f)) + os.remove(get_dataset_path(self.input_file + "_hankel" + "." + f)) + +# def testReadWriteRealHankel(self): +# adr = get_dataset_path("3.pautomac.train") +# data = load_data_sample(adr=adr) +# X = data.data +# sp = Spectral() +# sp = sp.fit(X) +# H = Hankel( sample_instance=X.sample, +# lrows=6, lcolumns=6, version="classic", +# partial=True, sparse=True, mode_quiet=True) +# for f in self.formats: +# Hankel.write(H, get_dataset_path("3.pautomac.train" + "_hankel" + "." + f), format=f) +# Hb = Hankel.read(get_dataset_path("3.pautomac.train" + "_hankel" + "." + f), format = f) +# self.assertEqual(H, Hb) +# for f in self.formats: +# os.remove(get_dataset_path("3.pautomac.train" + "_hankel" + "." + f)) + + def testOthersSerializationTypes(self): + data = [{'a' : 10, 40 : 'gu'}, {'toto', 5, 2.5, 'b'}, ('gh', 25, 'ko', 1.0)] + data_json_str = Serializer.data_to_json(data) + data_yaml_str = Serializer.data_to_yaml(data) + data_json = Serializer.json_to_data(data_json_str) + data_yaml = Serializer.yaml_to_data(data_yaml_str) + self.assertEqual(data, data_json) + self.assertEqual(data, data_yaml) + data = [1, 2, 3.0] + data_json_str = Serializer.data_to_json(data) + data_yaml_str = Serializer.data_to_yaml(data) + data_json = Serializer.json_to_data(data_json_str) + data_yaml = Serializer.yaml_to_data(data_yaml_str) + self.assertEqual(data, data_json) + self.assertEqual(data, data_yaml) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] -- GitLab