Skip to content
Snippets Groups Projects
Commit ae3269f7 authored by bbauvin's avatar bbauvin
Browse files

Added tests and refactored

parent c85c2b96
No related branches found
No related tags found
No related merge requests found
from Versions import testVersions if __name__=="__main__":
testVersions()
import sys import sys
from MonoMultiViewClassifiers import ExecClassif from MonoMultiViewClassifiers import ExecClassif
ExecClassif.execClassif(sys.argv[1:]) ExecClassif.execClassif(sys.argv[1:])
...@@ -22,8 +22,9 @@ def canProbas(): ...@@ -22,8 +22,9 @@ def canProbas():
def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
"""Used to fit the monoview classifier with the args stored in kwargs"""
num_estimators = int(kwargs['0']) num_estimators = int(kwargs['0'])
base_estimators = DecisionTreeClassifier() # kwargs['1'] base_estimators = DecisionTreeClassifier()
classifier = AdaBoostClassifier(n_estimators=num_estimators, base_estimator=base_estimators, classifier = AdaBoostClassifier(n_estimators=num_estimators, base_estimator=base_estimators,
random_state=randomState) random_state=randomState)
classifier.fit(DATASET, CLASS_LABELS) classifier.fit(DATASET, CLASS_LABELS)
...@@ -31,6 +32,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -31,6 +32,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
"""Used for weighted linear early fusion to generate random search sets"""
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 15), DecisionTreeClassifier()]) paramsSet.append([randomState.randint(1, 15), DecisionTreeClassifier()])
...@@ -38,12 +40,15 @@ def paramsToSet(nIter, randomState): ...@@ -38,12 +40,15 @@ def paramsToSet(nIter, randomState):
def getKWARGS(kwargsList): def getKWARGS(kwargsList):
"""Used to format kwargs for the parsed args"""
kwargsDict = {} kwargsDict = {}
for (kwargName, kwargValue) in kwargsList: for (kwargName, kwargValue) in kwargsList:
if kwargName == "CL_Adaboost_n_est": if kwargName == "CL_Adaboost_n_est":
kwargsDict['0'] = int(kwargValue) kwargsDict['0'] = int(kwargValue)
elif kwargName == "CL_Adaboost_b_est": elif kwargName == "CL_Adaboost_b_est":
kwargsDict['1'] = kwargValue kwargsDict['1'] = kwargValue
else:
raise(ValueError, "Wrong arguments served to Adaboost")
return kwargsDict return kwargsDict
...@@ -74,7 +79,7 @@ def randomizedSearch(X_train, y_train, randomState, outputFileName, KFolds=4, me ...@@ -74,7 +79,7 @@ def randomizedSearch(X_train, y_train, randomState, outputFileName, KFolds=4, me
def getConfig(config): def getConfig(config):
if type(config) not in [list, dict]: if type(config) not in [list, dict]: # Used in late fusion when config is a classifier
return "\n\t\t- Adaboost with num_esimators : " + str(config.n_estimators) + ", base_estimators : " + str( return "\n\t\t- Adaboost with num_esimators : " + str(config.n_estimators) + ", base_estimators : " + str(
config.base_estimator) config.base_estimator)
else: else:
...@@ -84,6 +89,7 @@ def getConfig(config): ...@@ -84,6 +89,7 @@ def getConfig(config):
return "\n\t\t- Adaboost with num_esimators : " + str(config["0"]) + ", base_estimators : " + str( return "\n\t\t- Adaboost with num_esimators : " + str(config["0"]) + ", base_estimators : " + str(
config["1"]) config["1"])
def getInterpret(classifier, directory): def getInterpret(classifier, directory):
interpretString = getFeatureImportance(classifier, directory) interpretString = getFeatureImportance(classifier, directory)
return interpretString return interpretString
\ No newline at end of file
...@@ -3,7 +3,6 @@ import os ...@@ -3,7 +3,6 @@ import os
for module in os.listdir(os.path.dirname(os.path.realpath(__file__))): for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
if module == '__init__.py' or module[-3:] == '.py' or module[-4:] == '.pyc' or module == '__pycache__' : if module == '__init__.py' or module[-3:] == '.py' or module[-4:] == '.pyc' or module == '__pycache__' :
continue continue
print(module)
__import__(module, locals(), globals(), [], 1) __import__(module, locals(), globals(), [], 1)
del module del module
del os del os
...@@ -107,7 +107,7 @@ def confirm(resp=True, timeout=15): ...@@ -107,7 +107,7 @@ def confirm(resp=True, timeout=15):
def input_(timeout=15): def input_(timeout=15):
"""used as a UI to stop if too much HDD space will be used""" """used as a UI to stop if too much HDD space will be used"""
print("You have " + str(timeout) + " seconds to stop the script by typing n") print("You have " + str(timeout) + " seconds to stop the dataset copy by typing n")
i, o, e = select.select([sys.stdin], [], [], timeout) i, o, e = select.select([sys.stdin], [], [], timeout)
if i: if i:
return sys.stdin.readline().strip() return sys.stdin.readline().strip()
......
# if __name__=="__main__":
# import unittest
# from .Tests.test_ExecClassif import suite
#
# runner = unittest.TextTestRunner()
# runner.run(suite())
\ No newline at end of file
import unittest
import numpy as np
from ...MonoMultiViewClassifiers.MonoviewClassifiers import Adaboost
class Test_fit(unittest.TestCase):
def setUp(self):
self.random_state = np.random.RandomState(42)
self.dataset = self.random_state.randint(0,100,(10,5))
self.labels = self.random_state.randint(0,2,10)
self.kwargs = {"0":5}
self.classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
def test_fit_kwargs_string(self):
self.kwargs = {"0":"5"}
classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
self.assertEqual(classifier.n_estimators, 5)
def test_fit_kwargs_int(self):
self.kwargs = {"0":5}
classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
self.assertEqual(classifier.n_estimators, 5)
def test_fit_labels(self):
predicted_labels = self.classifier.predict(self.dataset)
np.testing.assert_array_equal(predicted_labels, self.labels)
\ No newline at end of file
...@@ -6,7 +6,7 @@ import numpy as np ...@@ -6,7 +6,7 @@ import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit
from MonoMultiViewClassifiers.utils import execution from ...MonoMultiViewClassifiers.utils import execution
class Test_parseTheArgs(unittest.TestCase): class Test_parseTheArgs(unittest.TestCase):
...@@ -23,16 +23,16 @@ class Test_initRandomState(unittest.TestCase): ...@@ -23,16 +23,16 @@ class Test_initRandomState(unittest.TestCase):
def test_random_state_42(self): def test_random_state_42(self):
randomState_42 = np.random.RandomState(42) randomState_42 = np.random.RandomState(42)
randomState = execution.initRandomState("42", "Tests/temp_tests/") randomState = execution.initRandomState("42", "Code/Tests/temp_tests/")
os.remove("Tests/temp_tests/randomState.pickle") os.remove("Code/Tests/temp_tests/randomState.pickle")
np.testing.assert_array_equal(randomState.beta(1,100,100), np.testing.assert_array_equal(randomState.beta(1,100,100),
randomState_42.beta(1,100,100)) randomState_42.beta(1,100,100))
def test_random_state_pickle(self): def test_random_state_pickle(self):
randomState_to_pickle = execution.initRandomState(None, "Tests/temp_tests/") randomState_to_pickle = execution.initRandomState(None, "Code/Tests/temp_tests/")
pickled_randomState = execution.initRandomState("Tests/temp_tests/randomState.pickle", pickled_randomState = execution.initRandomState("Code/Tests/temp_tests/randomState.pickle",
"Tests/temp_tests/") "Code/Tests/temp_tests/")
os.remove("Tests/temp_tests/randomState.pickle") os.remove("Code/Tests/temp_tests/randomState.pickle")
np.testing.assert_array_equal(randomState_to_pickle.beta(1,100,100), np.testing.assert_array_equal(randomState_to_pickle.beta(1,100,100),
pickled_randomState.beta(1,100,100)) pickled_randomState.beta(1,100,100))
......
import unittest import unittest
import argparse import argparse
from MonoMultiViewClassifiers import ExecClassif from ..MonoMultiViewClassifiers import ExecClassif
class Test_initBenchmark(unittest.TestCase): class Test_initBenchmark(unittest.TestCase):
...@@ -23,12 +23,12 @@ class Test_initMonoviewArguments(unittest.TestCase): ...@@ -23,12 +23,12 @@ class Test_initMonoviewArguments(unittest.TestCase):
def test_initMonoviewArguments_no_monoview(self): def test_initMonoviewArguments_no_monoview(self):
benchmark = {"Monoview":{}, "Multiview":{}} benchmark = {"Monoview":{}, "Multiview":{}}
arguments = ExecClassif.initMonoviewArguments(benchmark, {}, [], [], None, 0, {}) arguments = ExecClassif.initMonoviewExps(benchmark, {}, [], None, 0, {})
self.assertEqual(arguments, {}) self.assertEqual(arguments, {})
def test_initMonoviewArguments_empty(self): def test_initMonoviewArguments_empty(self):
benchmark = {"Monoview":{}, "Multiview":{}} benchmark = {"Monoview":{}, "Multiview":{}}
arguments = ExecClassif.initMonoviewArguments(benchmark, {}, [], [], None, 0, {}) arguments = ExecClassif.initMonoviewExps(benchmark, {}, [], None, 0, {})
class Essai(unittest.TestCase): class Essai(unittest.TestCase):
...@@ -203,3 +203,10 @@ class Essai(unittest.TestCase): ...@@ -203,3 +203,10 @@ class Essai(unittest.TestCase):
help='Determine which method to use to select the monoview classifiers', help='Determine which method to use to select the monoview classifiers',
default="intersect") default="intersect")
self.args = parser.parse_args([]) self.args = parser.parse_args([])
def suite():
suite = unittest.TestSuite()
suite.addTest(Test_initBenchmark('test_initKWARGSFunc_no_monoview'))
# suite.addTest(WidgetTestCase('test_widget_resize'))
return suite
\ No newline at end of file
from . import MonoMultiViewClassifiers, Tests, Exec from . import MonoMultiViewClassifiers, Tests, Exec
import pdb;pdb.set_trace() # import pdb;pdb.set_trace()
\ No newline at end of file \ No newline at end of file
File moved
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment