Skip to content
Snippets Groups Projects
Commit ee44dedf authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Tests passing

parent 332ab1c0
No related branches found
No related tags found
No related merge requests found
......@@ -33,10 +33,10 @@ class Test_InitArgumentDictionaries(unittest.TestCase):
cls.nb_class = 2
cls.monoview_classifier_name = "fake_monoview_classifier"
cls.monoview_classifier_arg_name = "fake_arg"
cls.monoview_classifier_arg_value = ["fake_value_1"]
cls.monoview_classifier_arg_value = "fake_value_1"
cls.multiview_classifier_name = "fake_multiview_classifier"
cls.multiview_classifier_arg_name = "fake_arg_mv"
cls.multiview_classifier_arg_value = ["fake_value_2"]
cls.multiview_classifier_arg_value = "fake_value_2"
cls.init_kwargs = {
'monoview':{
cls.monoview_classifier_name:
......@@ -56,14 +56,14 @@ class Test_InitArgumentDictionaries(unittest.TestCase):
"None", {})
expected_output = [{
self.monoview_classifier_name: {
self.monoview_classifier_arg_name:self.monoview_classifier_arg_value[0]},
self.monoview_classifier_arg_name:self.monoview_classifier_arg_value},
"view_name": "test_view_0",
'hps_kwargs': {},
"classifier_name": self.monoview_classifier_name,
"nb_class": self.nb_class,
"view_index": 0},
{self.monoview_classifier_name: {
self.monoview_classifier_arg_name: self.monoview_classifier_arg_value[0]},
self.monoview_classifier_arg_name: self.monoview_classifier_arg_value},
"view_name": "test_view",
'hps_kwargs': {},
"classifier_name": self.monoview_classifier_name,
......@@ -89,7 +89,7 @@ class Test_InitArgumentDictionaries(unittest.TestCase):
"labels_names":None,
self.multiview_classifier_name: {
self.multiview_classifier_arg_name:
self.multiview_classifier_arg_value[0]},
self.multiview_classifier_arg_value},
},]
self.assertEqual(arguments["multiview"][0], expected_output[0])
......
......@@ -115,7 +115,7 @@ class Test_getHPs(unittest.TestCase):
cls.output_file_name = tmp_path
cls.cv = StratifiedKFold(n_splits=2, random_state=cls.random_state, shuffle=True)
cls.nb_cores = 1
cls.metrics = [["accuracy_score", None]]
cls.metrics = {"accuracy_score*": {}}
cls.kwargs = {"decision_tree" : {"max_depth": 1,
"criterion": "gini",
"splitter": "best"}}
......
......@@ -14,14 +14,14 @@ class Test_get_example_errors(unittest.TestCase):
results = [MultiviewResult("mv", "", {"accuracy_score": [0.7, 0.75],
"f1_score": [0.71, 0.76]},
np.array([0,0,0,0,1,1,1,1,1]),
0,0,0),
0,0,0, {}),
MonoviewResult(0,
"dt",
"1",
{"accuracy_score": [0.8, 0.85],
"f1_score": [0.81, 0.86]}
, np.array([0,0,1,1,0,0,1,1,0]), "", "",
"", "",0,0)
"", "",0,0, {})
]
example_errors = get_example_errors(ground_truth,
results)
......@@ -45,7 +45,7 @@ class Test_gen_error_data(unittest.TestCase):
self.assertEqual(nb_examples, 7)
self.assertEqual(classifiers_names, ["ada-1", "mv"])
np.testing.assert_array_equal(data_2d, np.array([ada_data, mv_data]).transpose())
np.testing.assert_array_equal(error_on_examples, -1*(ada_data+mv_data)/nb_classifiers)
np.testing.assert_array_equal(error_on_examples, (ada_data+mv_data)/nb_classifiers)
......@@ -72,5 +72,5 @@ class Test_gen_error_data_glob(unittest.TestCase):
self.assertEqual(nb_examples, 7)
self.assertEqual(nb_classifiers, 2)
np.testing.assert_array_equal(data, np.array([ada_sum, mv_sum]).transpose())
np.testing.assert_array_equal(error_on_examples, -1*np.sum(np.array([ada_sum, mv_sum]), axis=0)+(nb_classifiers*stats_iter))
np.testing.assert_array_equal(error_on_examples, np.sum(np.array([ada_sum, mv_sum]), axis=0)/(nb_classifiers*stats_iter))
self.assertEqual(classifier_names, ["ada-1", "mv"])
\ No newline at end of file
......@@ -10,7 +10,7 @@ from multiview_platform.mono_multi_view_classifiers.result_analysis.execution im
class Test_format_previous_results(unittest.TestCase):
def test_simple(self):
iter_results = {"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[], "durations":[]}
iter_results = {"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[], "durations":[], "class_metrics_scores":[]}
random_state = np.random.RandomState(42)
# Gen metrics data
......@@ -46,7 +46,7 @@ class Test_format_previous_results(unittest.TestCase):
data=np.ones((2, 2))))
# Running the function
metric_analysis, error_analysis, \
metric_analysis, class_met, error_analysis, \
feature_importances, feature_stds, \
labels, durations_mean, duration_std = format_previous_results(iter_results)
mean_df = pd.DataFrame(data=np.mean(np.array([metrics_1_data,
......
......@@ -12,20 +12,20 @@ class Test_get_metrics_scores(unittest.TestCase):
def test_simple(self):
metrics = [["accuracy_score"], ["f1_score"]]
metrics = {"accuracy_score*":{},"f1_score":{}}
results = [MonoviewResult(0,
"ada",
"0",
{"accuracy_score":[0.9, 0.95],
{"accuracy_score*":[0.9, 0.95],
"f1_score":[0.91, 0.96]}
, "", "", "", "", "",0,0)]
metrics_scores = get_metrics_scores(metrics,
results)
, "", "", "", "", "",0,0,{})]
metrics_scores, class_met = get_metrics_scores(metrics,
results, [])
self.assertIsInstance(metrics_scores, dict)
self.assertIsInstance(metrics_scores["accuracy_score"], pd.DataFrame)
np.testing.assert_array_equal(np.array(metrics_scores["accuracy_score"].loc["train"]), np.array([0.9]))
self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
np.testing.assert_array_equal(np.array(metrics_scores["accuracy_score*"].loc["train"]), np.array([0.9]))
np.testing.assert_array_equal(
np.array(metrics_scores["accuracy_score"].loc["test"]),
np.array(metrics_scores["accuracy_score*"].loc["test"]),
np.array([0.95]))
np.testing.assert_array_equal(
np.array(metrics_scores["f1_score"].loc["train"]),
......@@ -37,11 +37,11 @@ class Test_get_metrics_scores(unittest.TestCase):
np.array(["ada-0"]))
def test_multiple_monoview_classifiers(self):
metrics = [["accuracy_score"], ["f1_score"]]
metrics = {"accuracy_score*":{},"f1_score":{}}
results = [MonoviewResult(view_index=0,
classifier_name="ada",
view_name="0",
metrics_scores={"accuracy_score": [0.9, 0.95],
metrics_scores={"accuracy_score*": [0.9, 0.95],
"f1_score": [0.91, 0.96]},
full_labels_pred="",
classifier_config="",
......@@ -49,11 +49,12 @@ class Test_get_metrics_scores(unittest.TestCase):
n_features="",
hps_duration=0,
fit_duration=0,
pred_duration=0),
pred_duration=0,
class_metric_scores={}),
MonoviewResult(view_index=0,
classifier_name="dt",
view_name="1",
metrics_scores={"accuracy_score": [0.8, 0.85],
metrics_scores={"accuracy_score*": [0.8, 0.85],
"f1_score": [0.81, 0.86]},
full_labels_pred="",
classifier_config="",
......@@ -61,17 +62,18 @@ class Test_get_metrics_scores(unittest.TestCase):
n_features="",
hps_duration=0,
fit_duration=0,
pred_duration=0)
pred_duration=0,
class_metric_scores={})
]
metrics_scores = get_metrics_scores(metrics,
results)
metrics_scores, class_met = get_metrics_scores(metrics,
results, [])
self.assertIsInstance(metrics_scores, dict)
self.assertIsInstance(metrics_scores["accuracy_score"], pd.DataFrame)
self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
np.testing.assert_array_equal(
np.array(metrics_scores["accuracy_score"].loc["train"]),
np.array(metrics_scores["accuracy_score*"].loc["train"]),
np.array([0.9, 0.8]))
np.testing.assert_array_equal(
np.array(metrics_scores["accuracy_score"].loc["test"]),
np.array(metrics_scores["accuracy_score*"].loc["test"]),
np.array([0.95, 0.85]))
np.testing.assert_array_equal(
np.array(metrics_scores["f1_score"].loc["train"]),
......@@ -84,13 +86,13 @@ class Test_get_metrics_scores(unittest.TestCase):
np.array(["ada-0", "dt-1"]))
def test_mutiview_result(self):
metrics = [["accuracy_score"], ["f1_score"]]
results = [MultiviewResult("mv", "", {"accuracy_score": [0.7, 0.75],
"f1_score": [0.71, 0.76]}, "",0,0,0 ),
metrics = {"accuracy_score*":{},"f1_score":{}}
results = [MultiviewResult("mv", "", {"accuracy_score*": [0.7, 0.75],
"f1_score": [0.71, 0.76]}, "",0,0,0, {}),
MonoviewResult(view_index=0,
classifier_name="dt",
view_name="1",
metrics_scores={"accuracy_score": [0.8, 0.85],
metrics_scores={"accuracy_score*": [0.8, 0.85],
"f1_score": [0.81, 0.86]},
full_labels_pred="",
classifier_config="",
......@@ -98,17 +100,18 @@ class Test_get_metrics_scores(unittest.TestCase):
n_features="",
hps_duration=0,
fit_duration=0,
pred_duration=0)
pred_duration=0,
class_metric_scores={})
]
metrics_scores = get_metrics_scores(metrics,
results)
metrics_scores, class_met = get_metrics_scores(metrics,
results, [])
self.assertIsInstance(metrics_scores, dict)
self.assertIsInstance(metrics_scores["accuracy_score"], pd.DataFrame)
self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
np.testing.assert_array_equal(
np.array(metrics_scores["accuracy_score"].loc["train"]),
np.array(metrics_scores["accuracy_score*"].loc["train"]),
np.array([0.7, 0.8]))
np.testing.assert_array_equal(
np.array(metrics_scores["accuracy_score"].loc["test"]),
np.array(metrics_scores["accuracy_score*"].loc["test"]),
np.array([0.75, 0.85]))
np.testing.assert_array_equal(
np.array(metrics_scores["f1_score"].loc["train"]),
......@@ -132,17 +135,19 @@ class Test_init_plot(unittest.TestCase):
directory = "dir"
database_name = 'db'
labels_names = ['lb1', "lb2"]
class_met = metric_dataframe = pd.DataFrame(index=["train", "test"],
columns=["dt-1", "mv"], data=data)
train, test, classifier_names, \
file_name, nb_results, results = init_plot(results,
file_name, nb_results, results, class_test = init_plot(results,
metric_name,
metric_dataframe,
directory,
database_name,
labels_names)
self.assertEqual(file_name, os.path.join("dir", "db-lb1_vs_lb2-acc"))
class_met)
self.assertEqual(file_name, os.path.join("dir", "db-acc"))
np.testing.assert_array_equal(train, data[0,:])
np.testing.assert_array_equal(test, data[1, :])
np.testing.assert_array_equal(classifier_names, np.array(["dt-1", "mv"]))
self.assertEqual(nb_results, 2)
self.assertEqual(results, [["dt-1", "acc", data[1,0], 0],
["mv", "acc", data[1,1], 0]])
\ No newline at end of file
self.assertEqual(results, [["dt-1", "acc", data[1,0], 0.0, data[1,0]],
["mv", "acc", data[1,1], 0.0, data[1,1]]])
\ No newline at end of file
......@@ -32,14 +32,12 @@ class Test_ResultAnalyzer(unittest.TestCase):
cls.n_splits = 5
cls.k_folds = StratifiedKFold(n_splits=cls.n_splits, )
cls.hps_method = "randomized_search"
cls.metrics_list = [("accuracy_score", {}), ("f1_score", {})]
cls.metrics_list = {"accuracy_score": {}, "f1_score":{}}
cls.n_iter = 6
cls.class_label_names = ["class{}".format(ind+1)
for ind in range(cls.n_classes)]
cls.train_pred = np.random.randint(0, cls.n_classes,
size=cls.train_length)
cls.test_pred = np.random.randint(0, cls.n_classes,
size=cls.test_length)
cls.pred = np.random.randint(0, cls.n_classes,
size=cls.n_examples)
cls.directory = "fake_directory"
cls.base_file_name = "fake_file"
cls.labels = np.random.randint(0, cls.n_classes,
......@@ -48,19 +46,19 @@ class Test_ResultAnalyzer(unittest.TestCase):
cls.nb_cores = 0.5
cls.duration = -4
cls.train_accuracy = accuracy_score(cls.labels[cls.train_indices],
cls.train_pred)
cls.pred[cls.train_indices])
cls.test_accuracy = accuracy_score(cls.labels[cls.test_indices],
cls.test_pred)
cls.pred[cls.test_indices])
cls.train_f1 = f1_score(cls.labels[cls.train_indices],
cls.train_pred, average='micro')
cls.pred[cls.train_indices], average='micro')
cls.test_f1 = f1_score(cls.labels[cls.test_indices],
cls.test_pred, average='micro')
cls.pred[cls.test_indices], average='micro')
def test_simple(self):
RA = base.ResultAnalyser(self.classifier, self.classification_indices,
self.k_folds, self.hps_method, self.metrics_list,
self.n_iter, self.class_label_names,
self.train_pred, self.test_pred, self.directory,
self.pred, self.directory,
self.base_file_name, self.labels,
self.database_name, self.nb_cores,
self.duration)
......@@ -70,20 +68,20 @@ class Test_ResultAnalyzer(unittest.TestCase):
self.k_folds, self.hps_method,
self.metrics_list,
self.n_iter, self.class_label_names,
self.train_pred, self.test_pred,
self.pred,
self.directory, self.base_file_name,
self.labels, self.database_name,
self.nb_cores, self.duration)
train_score, test_score = RA.get_metric_score("accuracy_score", {})
self.assertEqual(train_score, self.train_accuracy)
self.assertEqual(test_score, self.test_accuracy)
cl_train, cl_test,train_score, test_score = RA.get_metric_score("accuracy_score", {})
np.testing.assert_array_equal(train_score, self.train_accuracy)
np.testing.assert_array_equal(test_score, self.test_accuracy)
def test_get_all_metrics_scores(self):
RA = base.ResultAnalyser(self.classifier, self.classification_indices,
self.k_folds, self.hps_method,
self.metrics_list,
self.n_iter, self.class_label_names,
self.train_pred, self.test_pred,
self.pred,
self.directory, self.base_file_name,
self.labels, self.database_name,
self.nb_cores, self.duration)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment