Skip to content
Snippets Groups Projects
Commit 93c22046 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Revmoved commentaries

parent 9a18669a
No related branches found
No related tags found
No related merge requests found
...@@ -499,308 +499,6 @@ class Test_get_path_dict(unittest.TestCase): ...@@ -499,308 +499,6 @@ class Test_get_path_dict(unittest.TestCase):
self.assertEqual(path_dict, {"a.b.c.d.e":1, "a.b.c.d.f":[1]}) self.assertEqual(path_dict, {"a.b.c.d.e":1, "a.b.c.d.f":[1]})
#
# class Test_analyzeMulticlass(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.flags = [[0, [0,1]], [0, [0,2]], [0, [0,3]], [0, [1,2]], [0, [1,3]], [0, [2,3]],
# [1, [0,1]], [1, [0,2]], [1, [0,3]], [1, [1,2]], [1, [1,3]], [1, [2,3]]]
# cls.preds = [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]), np.array([1,0,0,0,1]), np.array([1,1,0,1,1]),
# np.array([1,1,0,0,1]), np.array([1,1,1,0,1])] + \
# [np.array([0 in range(5)]) for i in range(6)]
# cls.preds2 = [np.array([0 in range(5)]) for i in range(6)] + \
# [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]),
# np.array([1,0,0,0,1]), np.array([1,1,0,1,1]), np.array([1,1,0,0,1]), np.array([1,1,1,0,1])]
# cls.classifiers_names = ["chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
# "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
# "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",]
# cls.classifiersNames2 = ["cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
# "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
# "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
# "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease"]
# cls.results = [[flag, [["", [name, "", "", pred]], ["", [name1, "", "", pred1]]], ["", ""]]
# for flag, name, pred, name1, pred1 in zip(cls.flags, cls.classifiers_names, cls.preds,
# cls.classifiersNames2, cls.preds2)]
# # cls.results = [[flag, ["", ["", name, "", pred]], ""] for flag, pred, name in
# # zip(cls.flags, cls.preds, cls.classifiers_names)]
# cls.statsIter = 2
# cls.nbExample = 5
# cls.nbLabels = 4
# cls.true_labels = np.array([0,1,2,3,0])
# cls.metrics = [["accuracy_score"]]
#
# def test_simple(cls):
# multiclassResults = ExecClassif.analyzeMulticlass(cls.results, cls.statsIter, cls.nbExample, cls.nbLabels, cls.true_labels, [["accuracy_score"]])
# np.testing.assert_array_equal(multiclassResults[1]["chicken_is_heaven"]["labels"], cls.true_labels)
#
# class Test_genMetricsScores(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
# cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
# cls.multiclassResults = [{"chicken_is_heaven":
# {"labels": cls.multiclass_labels}}]
# cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
# cls.metrics = [["accuracy_score"]]
# cls.score_to_get = accuracy_score(cls.true_labels, cls.multiclass_labels)
#
# def test_simple(cls):
# multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
# cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
#
# def test_multiple_clf(cls):
# cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
# "cheese_is_no_disease": {"labels": cls.wrong_labels}},
# ]
# multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
# cls.assertEqual(0, multiclassResults[0]["cheese_is_no_disease"]["metricsScores"]["accuracy_score"])
# cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
#
# def test_multiple_metrics(cls):
# from sklearn.metrics import f1_score
# cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
# cls.metrics = [["accuracy_score"], ["f1_score"]]
# multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
# cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
# cls.assertEqual(cls.score_to_get_f1, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["f1_score"])
#
# def test_multiple_iterations(cls):
# cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels}},
# {"chicken_is_heaven": {"labels": cls.wrong_labels}},
# ]
# multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
# cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
# cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
#
# def test_all(cls):
# cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
# "cheese_is_no_disease": {"labels": cls.wrong_labels}},
# {"chicken_is_heaven": {"labels": cls.wrong_labels},
# "cheese_is_no_disease": {"labels": cls.multiclass_labels}},
# ]
# cls.metrics = [["accuracy_score"], ["f1_score"]]
# from sklearn.metrics import f1_score
# cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
# multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
# cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
# cls.assertEqual(cls.score_to_get_f1, multiclassResults[1]["cheese_is_no_disease"]["metricsScores"]["f1_score"])
#
#
# class Test_getErrorOnLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
# cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
# cls.multiclassResults = [{"chicken_is_heaven":
# {"labels": cls.multiclass_labels}}]
# cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
#
# def test_simple(cls):
# multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
# np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
# multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
#
# def test_full(cls):
# cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
# "cheese_is_no_disease": {"labels": cls.wrong_labels}},
# {"chicken_is_heaven": {"labels": cls.wrong_labels},
# "cheese_is_no_disease": {"labels": cls.wrong_labels}},
# ]
# multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
# np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
# multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
# np.testing.assert_array_equal(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
# multiclassResults[1]["cheese_is_no_disease"]["errorOnExample"])
#
# def test_type(cls):
# multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
# cls.assertEqual(type(multiclassResults[0]["chicken_is_heaven"]["errorOnExample"][0]), np.int64)
# np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
# multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
#
# class Essai(unittest.TestCase):
#
# def setUp(self):
# parser = argparse.ArgumentParser(
# description='This file is used to benchmark the scores fo multiple classification algorithm on multiview data.',
# formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#
# groupStandard = parser.add_argument_group('Standard arguments')
# groupStandard.add_argument('-log', action='store_true', help='Use option to activate Logging to Console')
# groupStandard.add_argument('--name', metavar='STRING', action='store', help='Name of Database (default: %(default)s)',
# default='Plausible')
# groupStandard.add_argument('--type', metavar='STRING', action='store',
# help='Type of database : .hdf5 or .csv (default: %(default)s)',
# default='.hdf5')
# groupStandard.add_argument('--views', metavar='STRING', action='store', nargs="+",
# help='Name of the views selected for learning (default: %(default)s)',
# default=[''])
# groupStandard.add_argument('--pathF', metavar='STRING', action='store', help='Path to the views (default: %(default)s)',
# default='/home/bbauvin/Documents/data/Data_multi_omics/')
# groupStandard.add_argument('--nice', metavar='INT', action='store', type=int,
# help='Niceness for the process', default=0)
# groupStandard.add_argument('--random_state', metavar='STRING', action='store',
# help="The random state seed to use or a file where we can find it's get_state", default=None)
#
# groupClass = parser.add_argument_group('Classification arguments')
# groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
# help='Determine the split between learning and validation sets', type=float,
# default=0.2)
# groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
# type=int, default=2)
# groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
# default=2)
# groupClass.add_argument('--CL_classes', metavar='STRING', action='store', nargs="+",
# help='Classes used in the dataset (names of the folders) if not filled, random classes will be '
# 'selected ex. walrus mole leopard', default=["yes", "no"])
# groupClass.add_argument('--CL_type', metavar='STRING', action='store', nargs="+",
# help='Determine whether to use multiview and/or monoview, or Benchmark',
# default=['Benchmark'])
# groupClass.add_argument('--CL_algos_monoview', metavar='STRING', action='store', nargs="+",
# help='Determine which monoview classifier to use if empty, considering all',
# default=[''])
# groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store', nargs="+",
# help='Determine which multiview classifier to use if empty, considering all',
# default=[''])
# groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
# default=2)
# groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
# help="Number of iteration for each algorithm to mean preds if using multiple cores, it's highly recommended to use statsiter mod(nbCores) = 0",
# type=int,
# default=2)
# groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+",
# help='Determine which metrics to use, separate metric and configuration with ":".'
# ' If multiple, separate with space. If no metric is specified, '
# 'considering all with accuracy for classification '
# , default=[''])
# groupClass.add_argument('--CL_metric_princ', metavar='STRING', action='store',
# help='Determine which metric to use for randomSearch and optimization', default="f1_score")
# groupClass.add_argument('--CL_GS_iter', metavar='INT', action='store',
# help='Determine how many Randomized grid search tests to do', type=int, default=2)
# groupClass.add_argument('--CL_HPS_type', metavar='STRING', action='store',
# help='Determine which hyperparamter search function use', default="randomizedSearch")
#
# groupRF = parser.add_argument_group('Random Forest arguments')
# groupRF.add_argument('--CL_RandomForest_trees', metavar='INT', type=int, action='store', help='Number max trees',
# default=25)
# groupRF.add_argument('--CL_RandomForest_max_depth', metavar='INT', type=int, action='store',
# help='Max depth for the trees',
# default=5)
# groupRF.add_argument('--CL_RandomForest_criterion', metavar='STRING', action='store', help='Criterion for the trees',
# default="entropy")
#
# groupSVMLinear = parser.add_argument_group('Linear SVM arguments')
# groupSVMLinear.add_argument('--CL_SVMLinear_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
# default=1)
#
# groupSVMRBF = parser.add_argument_group('SVW-RBF arguments')
# groupSVMRBF.add_argument('--CL_SVMRBF_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
# default=1)
#
# groupSVMPoly = parser.add_argument_group('Poly SVM arguments')
# groupSVMPoly.add_argument('--CL_SVMPoly_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
# default=1)
# groupSVMPoly.add_argument('--CL_SVMPoly_deg', metavar='INT', type=int, action='store', help='Degree parameter used',
# default=2)
#
# groupAdaboost = parser.add_argument_group('Adaboost arguments')
# groupAdaboost.add_argument('--CL_Adaboost_n_est', metavar='INT', type=int, action='store', help='Number of estimators',
# default=2)
# groupAdaboost.add_argument('--CL_Adaboost_b_est', metavar='STRING', action='store', help='Estimators',
# default='DecisionTreeClassifier')
#
# groupDT = parser.add_argument_group('Decision Trees arguments')
# groupDT.add_argument('--CL_DecisionTree_depth', metavar='INT', type=int, action='store',
# help='Determine max depth for Decision Trees', default=3)
# groupDT.add_argument('--CL_DecisionTree_criterion', metavar='STRING', action='store',
# help='Determine max depth for Decision Trees', default="entropy")
# groupDT.add_argument('--CL_DecisionTree_splitter', metavar='STRING', action='store',
# help='Determine criterion for Decision Trees', default="random")
#
# groupSGD = parser.add_argument_group('SGD arguments')
# groupSGD.add_argument('--CL_SGD_alpha', metavar='FLOAT', type=float, action='store',
# help='Determine alpha for SGDClassifier', default=0.1)
# groupSGD.add_argument('--CL_SGD_loss', metavar='STRING', action='store',
# help='Determine loss for SGDClassifier', default='log')
# groupSGD.add_argument('--CL_SGD_penalty', metavar='STRING', action='store',
# help='Determine penalty for SGDClassifier', default='l2')
#
# groupKNN = parser.add_argument_group('KNN arguments')
# groupKNN.add_argument('--CL_KNN_neigh', metavar='INT', type=int, action='store',
# help='Determine number of neighbors for KNN', default=1)
# groupKNN.add_argument('--CL_KNN_weights', metavar='STRING', action='store',
# help='Determine number of neighbors for KNN', default="distance")
# groupKNN.add_argument('--CL_KNN_algo', metavar='STRING', action='store',
# help='Determine number of neighbors for KNN', default="auto")
# groupKNN.add_argument('--CL_KNN_p', metavar='INT', type=int, action='store',
# help='Determine number of neighbors for KNN', default=1)
#
# groupSCM = parser.add_argument_group('SCM arguments')
# groupSCM.add_argument('--CL_SCM_max_rules', metavar='INT', type=int, action='store',
# help='Max number of rules for SCM', default=1)
# groupSCM.add_argument('--CL_SCM_p', metavar='FLOAT', type=float, action='store',
# help='Max number of rules for SCM', default=1.0)
# groupSCM.add_argument('--CL_SCM_model_type', metavar='STRING', action='store',
# help='Max number of rules for SCM', default="conjunction")
#
# groupMumbo = parser.add_argument_group('Mumbo arguments')
# groupMumbo.add_argument('--MU_types', metavar='STRING', action='store', nargs="+",
# help='Determine which monoview classifier to use with Mumbo',
# default=[''])
# groupMumbo.add_argument('--MU_config', metavar='STRING', action='store', nargs='+',
# help='Configuration for the monoview classifier in Mumbo separate each classifier with sapce and each argument with:',
# default=[''])
# groupMumbo.add_argument('--MU_iter', metavar='INT', action='store', nargs=3,
# help='Max number of iteration, min number of iteration, convergence threshold', type=float,
# default=[10, 1, 0.01])
# groupMumbo.add_argument('--MU_combination', action='store_true',
# help='Try all the monoview classifiers combinations for each view',
# default=False)
#
#
# groupFusion = parser.add_argument_group('fusion arguments')
# groupFusion.add_argument('--FU_types', metavar='STRING', action='store', nargs="+",
# help='Determine which type of fusion to use',
# default=[''])
# groupEarlyFusion = parser.add_argument_group('Early fusion arguments')
# groupEarlyFusion.add_argument('--FU_early_methods', metavar='STRING', action='store', nargs="+",
# help='Determine which early fusion method of fusion to use',
# default=[''])
# groupEarlyFusion.add_argument('--FU_E_method_configs', metavar='STRING', action='store', nargs='+',
# help='Configuration for the early fusion methods separate '
# 'method by space and values by :',
# default=[''])
# groupEarlyFusion.add_argument('--FU_E_cl_config', metavar='STRING', action='store', nargs='+',
# help='Configuration for the monoview classifiers used separate classifier by space '
# 'and configs must be of form argument1_name:value,argument2_name:value',
# default=[''])
# groupEarlyFusion.add_argument('--FU_E_cl_names', metavar='STRING', action='store', nargs='+',
# help='Name of the classifiers used for each early fusion method', default=[''])
#
# groupLateFusion = parser.add_argument_group('Late Early fusion arguments')
# groupLateFusion.add_argument('--FU_late_methods', metavar='STRING', action='store', nargs="+",
# help='Determine which late fusion method of fusion to use',
# default=[''])
# groupLateFusion.add_argument('--FU_L_method_config', metavar='STRING', action='store', nargs='+',
# help='Configuration for the fusion method', default=[''])
# groupLateFusion.add_argument('--FU_L_cl_config', metavar='STRING', action='store', nargs='+',
# help='Configuration for the monoview classifiers used', default=[''])
# groupLateFusion.add_argument('--FU_L_cl_names', metavar='STRING', action='store', nargs="+",
# help='Names of the classifier used for late fusion', default=[''])
# groupLateFusion.add_argument('--FU_L_select_monoview', metavar='STRING', action='store',
# help='Determine which method to use to select the monoview classifiers',
# default="intersect")
# self.args = parser.parse_args([])
# def suite():
# suite = unittest.TestSuite()
# suite.addTest(Test_initBenchmark('test_initKWARGSFunc_no_monoview'))
# # suite.addTest(WidgetTestCase('test_widget_resize'))
# return suite
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
\ No newline at end of file
...@@ -5,780 +5,12 @@ import h5py ...@@ -5,780 +5,12 @@ import h5py
import numpy as np import numpy as np
from ...mono_multi_view_classifiers.utils import get_multiview_db from ...mono_multi_view_classifiers.utils import get_multiview_db
# <<<<<<< HEAD
# from ..utils import rm_tmp, tmp_path
#
#
# class Test_copyhdf5Dataset(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# rm_tmp()
# cls.random_state = np.random.RandomState(42)
# if not os.path.exists("multiview_platform/tests/tmp_tests"):
# os.mkdir("multiview_platform/tests/tmp_tests")
# cls.dataset_file = h5py.File(
# tmp_path+"test_copy.hdf5", "w")
# cls.dataset = cls.dataset_file.create_dataset("test",
# data=cls.random_state.randint(
# 0, 100, (10, 20)))
# cls.dataset.attrs["test_arg"] = "Am I copied"
#
# def test_simple_copy(self):
# get_multiview_db.copyhdf5_dataset(self.dataset_file, self.dataset_file,
# "test", "test_copy_1", np.arange(10))
# np.testing.assert_array_equal(self.dataset_file.get("test").value,
# self.dataset_file.get("test_copy_1").value)
# self.assertEqual("Am I copied",
# self.dataset_file.get("test_copy_1").attrs["test_arg"])
#
# def test_copy_only_some_indices(self):
# usedIndices = self.random_state.choice(10, 6, replace=False)
# get_multiview_db.copyhdf5_dataset(self.dataset_file, self.dataset_file,
# "test", "test_copy", usedIndices)
# np.testing.assert_array_equal(
# self.dataset_file.get("test").value[usedIndices, :],
# self.dataset_file.get("test_copy").value)
# self.assertEqual("Am I copied",
# self.dataset_file.get("test_copy").attrs["test_arg"])
#
# @classmethod
# def tearDownClass(cls):
# os.remove(tmp_path+"test_copy.hdf5")
# os.rmdir("multiview_platform/tests/tmp_tests")
#
#
# class Test_filterViews(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# rm_tmp()
# cls.random_state = np.random.RandomState(42)
# cls.views = ["test_view_1", "test_view_2"]
# if not os.path.exists("multiview_platform/tests/tmp_tests"):
# os.mkdir("multiview_platform/tests/tmp_tests")
# cls.dataset_file = h5py.File(
# tmp_path+"test_copy.hdf5", "w")
# cls.metadata_group = cls.dataset_file.create_group("Metadata")
# cls.metadata_group.attrs["nbView"] = 4
#
# for i in range(4):
# cls.dataset = cls.dataset_file.create_dataset("View" + str(i),
# data=cls.random_state.randint(
# 0, 100, (10, 20)))
# cls.dataset.attrs["name"] = "test_view_" + str(i)
#
# def test_simple_filter(self):
# self.temp_dataset_file = h5py.File(
# tmp_path+"test_copy_temp.hdf5", "w")
# self.dataset_file.copy("Metadata", self.temp_dataset_file)
# get_multiview_db.filter_views(self.dataset_file, self.temp_dataset_file,
# self.views, np.arange(10))
# self.assertEqual(self.dataset_file.get("View1").attrs["name"],
# self.temp_dataset_file.get("View0").attrs["name"])
# np.testing.assert_array_equal(self.dataset_file.get("View2").value,
# self.temp_dataset_file.get("View1").value)
# self.assertEqual(self.temp_dataset_file.get("Metadata").attrs["nbView"],
# 2)
#
# def test_filter_view_and_examples(self):
# self.temp_dataset_file = h5py.File(
# tmp_path+"test_copy_temp.hdf5", "w")
# self.dataset_file.copy("Metadata", self.temp_dataset_file)
# usedIndices = self.random_state.choice(10, 6, replace=False)
# get_multiview_db.filter_views(self.dataset_file, self.temp_dataset_file,
# self.views, usedIndices)
# np.testing.assert_array_equal(
# self.dataset_file.get("View1").value[usedIndices, :],
# self.temp_dataset_file.get("View0").value)
# self.temp_dataset_file.close()
#
# @classmethod
# def tearDownClass(cls):
# os.remove(tmp_path+"test_copy.hdf5")
# os.remove(tmp_path+"test_copy_temp.hdf5")
# os.rmdir("multiview_platform/tests/tmp_tests")
# =======
from ..utils import rm_tmp, tmp_path, test_dataset from ..utils import rm_tmp, tmp_path, test_dataset
# >>>>>>> 66129965ae7c38b4bdb4ae657369ab24357070cd
# class Test_copyhdf5Dataset(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# rm_tmp()
# cls.random_state = np.random.RandomState(42)
# if not os.path.exists("multiview_platform/tests/tmp_tests"):
# os.mkdir("multiview_platform/tests/tmp_tests")
# cls.dataset_file = h5py.File(
# tmp_path+"test_copy.hdf5", "w")
# cls.dataset = cls.dataset_file.create_dataset("test",
# data=cls.random_state.randint(
# 0, 100, (10, 20)))
# cls.dataset.attrs["test_arg"] = "Am I copied"
#
# def test_simple_copy(cls):
# get_multiview_db.copyhdf5_dataset(cls.dataset_file, cls.dataset_file,
# "test", "test_copy_1", np.arange(10))
# np.testing.assert_array_equal(cls.dataset_file.get("test").value,
# cls.dataset_file.get("test_copy_1").value)
# cls.assertEqual("Am I copied",
# cls.dataset_file.get("test_copy_1").attrs["test_arg"])
#
# def test_copy_only_some_indices(cls):
# usedIndices = cls.random_state.choice(10, 6, replace=False)
# get_multiview_db.copyhdf5_dataset(cls.dataset_file, cls.dataset_file,
# "test", "test_copy", usedIndices)
# np.testing.assert_array_equal(
# cls.dataset_file.get("test").value[usedIndices, :],
# cls.dataset_file.get("test_copy").value)
# cls.assertEqual("Am I copied",
# cls.dataset_file.get("test_copy").attrs["test_arg"])
#
# @classmethod
# def tearDownClass(cls):
# os.remove(tmp_path+"test_copy.hdf5")
# os.rmdir("multiview_platform/tests/tmp_tests")
#
#
# class Test_filterViews(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# rm_tmp()
# cls.random_state = np.random.RandomState(42)
# cls.views = ["test_view_1", "test_view_2"]
# if not os.path.exists("multiview_platform/tests/tmp_tests"):
# os.mkdir("multiview_platform/tests/tmp_tests")
# cls.dataset_file = h5py.File(
# tmp_path+"test_copy.hdf5", "w")
# cls.metadata_group = cls.dataset_file.create_group("Metadata")
# cls.metadata_group.attrs["nbView"] = 4
#
# for i in range(4):
# cls.dataset = cls.dataset_file.create_dataset("View" + str(i),
# data=cls.random_state.randint(
# 0, 100, (10, 20)))
# cls.dataset.attrs["name"] = "test_view_" + str(i)
#
# def test_simple_filter(cls):
# cls.temp_dataset_file = h5py.File(
# tmp_path+"test_copy_temp.hdf5", "w")
# cls.dataset_file.copy("Metadata", cls.temp_dataset_file)
# get_multiview_db.filter_views(cls.dataset_file, cls.temp_dataset_file,
# cls.views, np.arange(10))
# cls.assertEqual(cls.dataset_file.get("View1").attrs["name"],
# cls.temp_dataset_file.get("View0").attrs["name"])
# np.testing.assert_array_equal(cls.dataset_file.get("View2").value,
# cls.temp_dataset_file.get("View1").value)
# cls.assertEqual(cls.temp_dataset_file.get("Metadata").attrs["nbView"],
# 2)
#
# def test_filter_view_and_examples(cls):
# cls.temp_dataset_file = h5py.File(
# tmp_path+"test_copy_temp.hdf5", "w")
# cls.dataset_file.copy("Metadata", cls.temp_dataset_file)
# usedIndices = cls.random_state.choice(10, 6, replace=False)
# get_multiview_db.filter_views(cls.dataset_file, cls.temp_dataset_file,
# cls.views, usedIndices)
# np.testing.assert_array_equal(
# cls.dataset_file.get("View1").value[usedIndices, :],
# cls.temp_dataset_file.get("View0").value)
# cls.temp_dataset_file.close()
#
# @classmethod
# def tearDownClass(cls):
# os.remove(tmp_path+"test_copy.hdf5")
# os.remove(tmp_path+"test_copy_temp.hdf5")
# os.rmdir("multiview_platform/tests/tmp_tests")
#
#
# #
# class Test_filterLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.labelsSet = set(range(4))
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
#
# def test_simple(cls):
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(cls.labelsSet,
# cls.askedLabelsNamesSet,
# cls.fullLabels,
# cls.availableLabelsNames,
# cls.askedLabelsNames)
# cls.assertEqual(["test_label_1", "test_label_3"], newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.array([1, 5, 9]))
# np.testing.assert_array_equal(newLabels, np.array([1, 1, 0]))
#
# def test_biclasse(cls):
# cls.labelsSet = {0, 1}
# cls.fullLabels = cls.random_state.randint(0, 2, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(cls.labelsSet,
# cls.askedLabelsNamesSet,
# cls.fullLabels,
# cls.availableLabelsNames,
# cls.askedLabelsNames)
# cls.assertEqual(cls.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, cls.fullLabels)
#
# def test_asked_too_many_labels(cls):
# cls.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3",
# "chicken_is_heaven"}
# with cls.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.filter_labels(cls.labelsSet,
# cls.askedLabelsNamesSet,
# cls.fullLabels,
# cls.availableLabelsNames,
# cls.askedLabelsNames)
# exception = catcher.exception
#
# def test_asked_all_labels(cls):
# cls.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3"}
# cls.askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(cls.labelsSet,
# cls.askedLabelsNamesSet,
# cls.fullLabels,
# cls.availableLabelsNames,
# cls.askedLabelsNames)
# cls.assertEqual(cls.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, cls.fullLabels)
#
#
# class Test_selectAskedLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
#
# def test_simple(cls):
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.select_asked_labels(cls.askedLabelsNamesSet,
# cls.availableLabelsNames,
# cls.askedLabelsNames,
# cls.fullLabels)
# cls.assertEqual(["test_label_1", "test_label_3"], newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.array([1, 5, 9]))
# np.testing.assert_array_equal(newLabels, np.array([1, 1, 0]))
#
# def test_asked_all_labels(cls):
# cls.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3"}
# cls.askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.select_asked_labels(cls.askedLabelsNamesSet,
# cls.availableLabelsNames,
# cls.askedLabelsNames,
# cls.fullLabels)
# cls.assertEqual(cls.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, cls.fullLabels)
#
# def test_asked_unavailable_labels(cls):
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3",
# "chicken_is_heaven"}
# with cls.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.select_asked_labels(cls.askedLabelsNamesSet,
# cls.availableLabelsNames,
# cls.askedLabelsNames,
# cls.fullLabels)
# exception = catcher.exception
# # cls.assertTrue("Asked labels are not all available in the dataset" in exception)
#
#
# class Test_getAllLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
#
# def test_simple(cls):
# newLabels, newLabelsNames, usedIndices = get_multiview_db.get_all_labels(
# cls.fullLabels, cls.availableLabelsNames)
# cls.assertEqual(cls.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, cls.fullLabels)
#
#
# class Test_fillLabelNames(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.NB_CLASS = 2
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
# cls.random_state = np.random.RandomState(42)
# cls.availableLabelsNames = ["test_label_" + str(_) for _ in range(40)]
#
# def test_simple(cls):
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# cls.NB_CLASS,
# cls.askedLabelsNames,
# cls.random_state,
# cls.availableLabelsNames)
# cls.assertEqual(askedLabelsNames, cls.askedLabelsNames)
# cls.assertEqual(askedLabelsNamesSet, set(cls.askedLabelsNames))
#
# def test_missing_labels_names(cls):
# cls.NB_CLASS = 39
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# cls.NB_CLASS,
# cls.askedLabelsNames,
# cls.random_state,
# cls.availableLabelsNames)
#
# cls.assertEqual(askedLabelsNames,
# ['test_label_1', 'test_label_3', 'test_label_35',
# 'test_label_38', 'test_label_6', 'test_label_15',
# 'test_label_32', 'test_label_28', 'test_label_8',
# 'test_label_29', 'test_label_26', 'test_label_17',
# 'test_label_19', 'test_label_10', 'test_label_18',
# 'test_label_14', 'test_label_21', 'test_label_11',
# 'test_label_34', 'test_label_0', 'test_label_27',
# 'test_label_7', 'test_label_13', 'test_label_2',
# 'test_label_39', 'test_label_23', 'test_label_4',
# 'test_label_31', 'test_label_37', 'test_label_5',
# 'test_label_36', 'test_label_25', 'test_label_33',
# 'test_label_12', 'test_label_24', 'test_label_20',
# 'test_label_22', 'test_label_9', 'test_label_16'])
# cls.assertEqual(askedLabelsNamesSet, set(
# ["test_label_" + str(_) for _ in range(30)] + [
# "test_label_" + str(31 + _) for _ in range(9)]))
#
# def test_too_many_label_names(cls):
# cls.NB_CLASS = 2
# cls.askedLabelsNames = ["test_label_1", "test_label_3", "test_label_4",
# "test_label_6"]
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# cls.NB_CLASS,
# cls.askedLabelsNames,
# cls.random_state,
# cls.availableLabelsNames)
# cls.assertEqual(askedLabelsNames, ["test_label_3", "test_label_6"])
# cls.assertEqual(askedLabelsNamesSet, {"test_label_3", "test_label_6"})
#
#
# class Test_allAskedLabelsAreAvailable(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
#
# def test_asked_available_labels(cls):
# cls.assertTrue(
# get_multiview_db.all_asked_labels_are_available(cls.askedLabelsNamesSet,
# cls.availableLabelsNames))
#
# def test_asked_unavailable_label(cls):
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3",
# "chicken_is_heaven"}
# cls.assertFalse(
# get_multiview_db.all_asked_labels_are_available(cls.askedLabelsNamesSet,
# cls.availableLabelsNames))
#
#
# class Test_getClasses(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
#
# def test_multiclass(cls):
# labelsSet = get_multiview_db.get_classes(
# cls.random_state.randint(0, 5, 30))
# cls.assertEqual(labelsSet, {0, 1, 2, 3, 4})
#
# def test_biclass(cls):
# labelsSet = get_multiview_db.get_classes(
# cls.random_state.randint(0, 2, 30))
# cls.assertEqual(labelsSet, {0, 1})
#
# def test_one_class(cls):
# with cls.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.get_classes(np.zeros(30, dtype=int))
# exception = catcher.exception
# # cls.assertTrue("Dataset must have at least two different labels" in exception)
#
# <<<<<<< HEAD
# class Test_filterLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.labelsSet = set(range(4))
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
#
# def test_simple(self):
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(self.labelsSet,
# self.askedLabelsNamesSet,
# self.fullLabels,
# self.availableLabelsNames,
# self.askedLabelsNames)
# self.assertEqual(["test_label_1", "test_label_3"], newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.array([1, 5, 9]))
# np.testing.assert_array_equal(newLabels, np.array([1, 1, 0]))
#
# def test_biclasse(self):
# self.labelsSet = {0, 1}
# self.fullLabels = self.random_state.randint(0, 2, 10)
# self.availableLabelsNames = ["test_label_0", "test_label_1"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(self.labelsSet,
# self.askedLabelsNamesSet,
# self.fullLabels,
# self.availableLabelsNames,
# self.askedLabelsNames)
# self.assertEqual(self.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, self.fullLabels)
#
# def test_asked_too_many_labels(self):
# self.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3",
# "chicken_is_heaven"}
# with self.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.filter_labels(self.labelsSet,
# self.askedLabelsNamesSet,
# self.fullLabels,
# self.availableLabelsNames,
# self.askedLabelsNames)
# exception = catcher.exception
#
# def test_asked_all_labels(self):
# self.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3"}
# self.askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.filter_labels(self.labelsSet,
# self.askedLabelsNamesSet,
# self.fullLabels,
# self.availableLabelsNames,
# self.askedLabelsNames)
# self.assertEqual(self.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, self.fullLabels)
#
#
# class Test_selectAskedLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
#
# def test_simple(self):
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.select_asked_labels(self.askedLabelsNamesSet,
# self.availableLabelsNames,
# self.askedLabelsNames,
# self.fullLabels)
# self.assertEqual(["test_label_1", "test_label_3"], newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.array([1, 5, 9]))
# np.testing.assert_array_equal(newLabels, np.array([1, 1, 0]))
#
# def test_asked_all_labels(self):
# self.askedLabelsNamesSet = {"test_label_0", "test_label_1",
# "test_label_2", "test_label_3"}
# self.askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# newLabels, \
# newLabelsNames, \
# usedIndices = get_multiview_db.select_asked_labels(self.askedLabelsNamesSet,
# self.availableLabelsNames,
# self.askedLabelsNames,
# self.fullLabels)
# self.assertEqual(self.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, self.fullLabels)
#
# def test_asked_unavailable_labels(self):
# self.askedLabelsNamesSet = {"test_label_1", "test_label_3",
# "chicken_is_heaven"}
# with self.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.select_asked_labels(self.askedLabelsNamesSet,
# self.availableLabelsNames,
# self.askedLabelsNames,
# self.fullLabels)
# exception = catcher.exception
# # self.assertTrue("Asked labels are not all available in the dataset" in exception)
#
#
# class Test_getAllLabels(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
# cls.fullLabels = cls.random_state.randint(0, 4, 10)
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
#
# def test_simple(self):
# newLabels, newLabelsNames, usedIndices = get_multiview_db.get_all_labels(
# self.fullLabels, self.availableLabelsNames)
# self.assertEqual(self.availableLabelsNames, newLabelsNames)
# np.testing.assert_array_equal(usedIndices, np.arange(10))
# np.testing.assert_array_equal(newLabels, self.fullLabels)
#
#
# class Test_fillLabelNames(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.NB_CLASS = 2
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
# cls.random_state = np.random.RandomState(42)
# cls.availableLabelsNames = ["test_label_" + str(_) for _ in range(40)]
#
# def test_simple(self):
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# self.NB_CLASS,
# self.askedLabelsNames,
# self.random_state,
# self.availableLabelsNames)
# self.assertEqual(askedLabelsNames, self.askedLabelsNames)
# self.assertEqual(askedLabelsNamesSet, set(self.askedLabelsNames))
#
# def test_missing_labels_names(self):
# self.NB_CLASS = 39
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# self.NB_CLASS,
# self.askedLabelsNames,
# self.random_state,
# self.availableLabelsNames)
#
# self.assertEqual(askedLabelsNames,
# ['test_label_1', 'test_label_3', 'test_label_35',
# 'test_label_38', 'test_label_6', 'test_label_15',
# 'test_label_32', 'test_label_28', 'test_label_8',
# 'test_label_29', 'test_label_26', 'test_label_17',
# 'test_label_19', 'test_label_10', 'test_label_18',
# 'test_label_14', 'test_label_21', 'test_label_11',
# 'test_label_34', 'test_label_0', 'test_label_27',
# 'test_label_7', 'test_label_13', 'test_label_2',
# 'test_label_39', 'test_label_23', 'test_label_4',
# 'test_label_31', 'test_label_37', 'test_label_5',
# 'test_label_36', 'test_label_25', 'test_label_33',
# 'test_label_12', 'test_label_24', 'test_label_20',
# 'test_label_22', 'test_label_9', 'test_label_16'])
# self.assertEqual(askedLabelsNamesSet, set(
# ["test_label_" + str(_) for _ in range(30)] + [
# "test_label_" + str(31 + _) for _ in range(9)]))
#
# def test_too_many_label_names(self):
# self.NB_CLASS = 2
# self.askedLabelsNames = ["test_label_1", "test_label_3", "test_label_4",
# "test_label_6"]
# askedLabelsNames, askedLabelsNamesSet = get_multiview_db.fill_label_names(
# self.NB_CLASS,
# self.askedLabelsNames,
# self.random_state,
# self.availableLabelsNames)
# self.assertEqual(askedLabelsNames, ["test_label_3", "test_label_6"])
# self.assertEqual(askedLabelsNamesSet, {"test_label_3", "test_label_6"})
#
#
# class Test_allAskedLabelsAreAvailable(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.askedLabelsNamesSet = {"test_label_1", "test_label_3"}
# cls.availableLabelsNames = ["test_label_0", "test_label_1",
# "test_label_2", "test_label_3"]
#
# def test_asked_available_labels(self):
# self.assertTrue(
# get_multiview_db.all_asked_labels_are_available(self.askedLabelsNamesSet,
# self.availableLabelsNames))
#
# def test_asked_unavailable_label(self):
# self.askedLabelsNamesSet = {"test_label_1", "test_label_3",
# "chicken_is_heaven"}
# self.assertFalse(
# get_multiview_db.all_asked_labels_are_available(self.askedLabelsNamesSet,
# self.availableLabelsNames))
#
#
# class Test_getClasses(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.random_state = np.random.RandomState(42)
#
# def test_multiclass(self):
# labelsSet = get_multiview_db.get_classes(
# self.random_state.randint(0, 5, 30))
# self.assertEqual(labelsSet, {0, 1, 2, 3, 4})
#
# def test_biclass(self):
# labelsSet = get_multiview_db.get_classes(
# self.random_state.randint(0, 2, 30))
# self.assertEqual(labelsSet, {0, 1})
#
# def test_one_class(self):
# with self.assertRaises(get_multiview_db.DatasetError) as catcher:
# get_multiview_db.get_classes(np.zeros(30, dtype=int))
# exception = catcher.exception
# # self.assertTrue("Dataset must have at least two different labels" in exception)
#
# =======
# >>>>>>> 66129965ae7c38b4bdb4ae657369ab24357070cd
class Test_get_classic_db_hdf5(unittest.TestCase): class Test_get_classic_db_hdf5(unittest.TestCase):
def setUp(self): def setUp(self):
rm_tmp() rm_tmp()
# <<<<<<< HEAD
# if not os.path.exists("multiview_platform/tests/tmp_tests"):
# os.mkdir("multiview_platform/tests/tmp_tests")
# cls.dataset_file = h5py.File(
# tmp_path+"test_dataset.hdf5", "w")
# cls.pathF = tmp_path
# cls.nameDB = "test_dataset"
# cls.NB_CLASS = 2
# cls.askedLabelsNames = ["test_label_1", "test_label_3"]
# cls.random_state = np.random.RandomState(42)
# cls.views = ["test_view_1", "test_view_3"]
# cls.metadata_group = cls.dataset_file.create_group("Metadata")
# cls.metadata_group.attrs["nbView"] = 4
# cls.labels_dataset = cls.dataset_file.create_dataset("Labels",
# data=cls.random_state.randint(
# 0, 4, 10))
# cls.labels_dataset.attrs["names"] = ["test_label_0".encode(),
# "test_label_1".encode(),
# "test_label_2".encode(),
# "test_label_3".encode()]
#
# for i in range(4):
# cls.dataset = cls.dataset_file.create_dataset("View" + str(i),
# data=cls.random_state.randint(
# 0, 100, (10, 20)))
# cls.dataset.attrs["name"] = "test_view_" + str(i)
#
# def test_simple(self):
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
# self.views, self.pathF, self.nameDB,
# self.NB_CLASS, self.askedLabelsNames,
# self.random_state)
# self.assertEqual(dataset_file.get("View1").attrs["name"], "test_view_3")
# self.assertEqual(labels_dictionary,
# {0: "test_label_1", 1: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 3)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 2)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 2)
# np.testing.assert_array_equal(dataset_file.get("View0").value,
# self.dataset_file.get("View1").value[
# np.array([1, 5, 9]), :])
#
# def test_all_labels_asked(self):
# askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# NB_CLASS = 4
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
# self.views, self.pathF, self.nameDB,
# NB_CLASS, askedLabelsNames,
# self.random_state)
# self.assertEqual(dataset_name, 'test_dataset_temp_view_label_select')
# self.assertEqual(dataset_file.get("View1").attrs["name"], "test_view_3")
# self.assertEqual(labels_dictionary,
# {0: "test_label_0", 1: "test_label_1",
# 2: "test_label_2", 3: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 10)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 2)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 4)
# np.testing.assert_array_equal(dataset_file.get("View0").value,
# self.dataset_file.get("View1").value)
#
# def test_all_views_asked(self):
# views = ["test_view_0", "test_view_1", "test_view_2", "test_view_3"]
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(views,
# self.pathF,
# self.nameDB,
# self.NB_CLASS,
# self.askedLabelsNames,
# self.random_state)
# for viewIndex in range(4):
# np.testing.assert_array_equal(
# dataset_file.get("View" + str(viewIndex)).value,
# self.dataset_file.get("View" + str(viewIndex)).value[
# np.array([1, 5, 9]), :])
# self.assertEqual(
# dataset_file.get("View" + str(viewIndex)).attrs["name"],
# "test_view_" + str(viewIndex))
# self.assertEqual(labels_dictionary,
# {0: "test_label_1", 1: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 3)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 4)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 2)
#
# def test_asked_the_whole_dataset(self):
# askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# NB_CLASS = 4
# views = ["test_view_0", "test_view_1", "test_view_2", "test_view_3"]
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(views,
# self.pathF,
# self.nameDB,
# NB_CLASS,
# askedLabelsNames,
# self.random_state)
# for viewIndex in range(4):
# np.testing.assert_array_equal(
# dataset_file.get("View" + str(viewIndex)).value,
# self.dataset_file.get("View" + str(viewIndex)))
# self.assertEqual(
# dataset_file.get("View" + str(viewIndex)).attrs["name"],
# "test_view_" + str(viewIndex))
# self.assertEqual(labels_dictionary,
# {0: "test_label_0", 1: "test_label_1",
# 2: "test_label_2", 3: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 10)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 4)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 4)
# =======
os.mkdir(tmp_path) os.mkdir(tmp_path)
self.rs = np.random.RandomState(42) self.rs = np.random.RandomState(42)
self.nb_view = 3 self.nb_view = 3
...@@ -810,7 +42,6 @@ class Test_get_classic_db_hdf5(unittest.TestCase): ...@@ -810,7 +42,6 @@ class Test_get_classic_db_hdf5(unittest.TestCase):
meta_data_grp.attrs["nbView"] = len(self.views) meta_data_grp.attrs["nbView"] = len(self.views)
meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels)) meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels))
meta_data_grp.attrs["datasetLength"] = len(self.labels) meta_data_grp.attrs["datasetLength"] = len(self.labels)
# >>>>>>> 66129965ae7c38b4bdb4ae657369ab24357070cd
def test_simple(self): def test_simple(self):
dataset , labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5( dataset , labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
...@@ -868,89 +99,7 @@ class Test_get_classic_db_csv(unittest.TestCase): ...@@ -868,89 +99,7 @@ class Test_get_classic_db_csv(unittest.TestCase):
data, delimiter=",") data, delimiter=",")
self.datas.append(data) self.datas.append(data)
# <<<<<<< HEAD
# def test_simple(self):
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(
# self.views, self.pathF, self.nameDB,
# self.NB_CLASS, self.askedLabelsNames,
# self.random_state, delimiter=",")
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 2)
# self.assertEqual(dataset_file.get("View1").attrs["name"], "test_view_3")
# self.assertEqual(dataset_file.get("View0").attrs["name"], "test_view_1")
# self.assertEqual(labels_dictionary,
# {0: "test_label_1", 1: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 3)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 2)
# np.testing.assert_array_equal(dataset_file.get("View0").value,
# self.datas[1][np.array([1, 5, 9]), :])
#
# def test_all_views_asked(self):
# views = ["test_view_0", "test_view_1", "test_view_2", "test_view_3"]
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(views,
# self.pathF,
# self.nameDB,
# self.NB_CLASS,
# self.askedLabelsNames,
# self.random_state,
# delimiter=",")
# self.assertEqual(labels_dictionary,
# {0: "test_label_1", 1: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 3)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 4)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 2)
# self.assertEqual(dataset_name,'test_dataset_temp_view_label_select')
# for viewIndex in range(4):
# np.testing.assert_array_equal(
# dataset_file.get("View" + str(viewIndex)).value,
# self.datas[viewIndex][np.array([1, 5, 9]), :])
# self.assertEqual(
# dataset_file.get("View" + str(viewIndex)).attrs["name"],
# "test_view_" + str(viewIndex))
#
# def test_all_labels_asked(self):
# askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# NB_CLASS = 4
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(
# self.views, self.pathF, self.nameDB,
# NB_CLASS, askedLabelsNames,
# self.random_state, delimiter=",")
# self.assertEqual(dataset_file.get("View1").attrs["name"], "test_view_3")
# self.assertEqual(labels_dictionary,
# {0: "test_label_0", 1: "test_label_1",
# 2: "test_label_2", 3: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 10)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 2)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 4)
# np.testing.assert_array_equal(dataset_file.get("View0").value,
# self.datas[1])
#
# def test_asked_the_whole_dataset(self):
# askedLabelsNames = ["test_label_0", "test_label_1", "test_label_2",
# "test_label_3"]
# NB_CLASS = 4
# views = ["test_view_0", "test_view_1", "test_view_2", "test_view_3"]
# dataset_file, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(views,
# self.pathF,
# self.nameDB,
# NB_CLASS,
# askedLabelsNames,
# self.random_state,
# delimiter=",")
# for viewIndex in range(4):
# np.testing.assert_array_equal(
# dataset_file.get("View" + str(viewIndex)).value,
# self.datas[viewIndex])
# self.assertEqual(
# dataset_file.get("View" + str(viewIndex)).attrs["name"],
# "test_view_" + str(viewIndex))
# self.assertEqual(labels_dictionary,
# {0: "test_label_0", 1: "test_label_1",
# 2: "test_label_2", 3: "test_label_3"})
# self.assertEqual(dataset_file.get("Metadata").attrs["datasetLength"], 10)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbView"], 4)
# self.assertEqual(dataset_file.get("Metadata").attrs["nbClass"], 4)
# =======
def test_simple(self): def test_simple(self):
dataset, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv( dataset, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(
self.views, self.pathF, self.nameDB, self.views, self.pathF, self.nameDB,
...@@ -963,7 +112,6 @@ class Test_get_classic_db_csv(unittest.TestCase): ...@@ -963,7 +112,6 @@ class Test_get_classic_db_csv(unittest.TestCase):
self.assertEqual(dataset.get_nb_examples(), 3) self.assertEqual(dataset.get_nb_examples(), 3)
self.assertEqual(dataset.get_nb_class(), 2) self.assertEqual(dataset.get_nb_class(), 2)
# >>>>>>> 66129965ae7c38b4bdb4ae657369ab24357070cd
@classmethod @classmethod
def tearDown(self): def tearDown(self):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment