Skip to content
Snippets Groups Projects
Commit 195a26b6 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Merge branch 'develop' into private_algos

parents a9a15610 bbaaa5d7
No related branches found
No related tags found
No related merge requests found
...@@ -50,8 +50,7 @@ def publish_example_errors(example_errors, directory, databaseName, ...@@ -50,8 +50,7 @@ def publish_example_errors(example_errors, directory, databaseName,
labels_names, example_ids, labels): labels_names, example_ids, labels):
logging.debug("Start:\t Biclass Label analysis figure generation") logging.debug("Start:\t Biclass Label analysis figure generation")
base_file_name = os.path.join(directory, databaseName + "-" + "_vs_".join( base_file_name = os.path.join(directory, databaseName + "-" )
labels_names) + "-")
nb_classifiers, nb_examples, classifiers_names, \ nb_classifiers, nb_examples, classifiers_names, \
data_2d, error_on_examples = gen_error_data(example_errors) data_2d, error_on_examples = gen_error_data(example_errors)
......
...@@ -7,6 +7,7 @@ import logging ...@@ -7,6 +7,7 @@ import logging
from ..utils.organization import secure_file_path from ..utils.organization import secure_file_path
def get_metrics_scores(metrics, results, label_names): def get_metrics_scores(metrics, results, label_names):
r"""Used to extract metrics scores in case of classification r"""Used to extract metrics scores in case of classification
...@@ -41,6 +42,10 @@ def get_metrics_scores(metrics, results, label_names): ...@@ -41,6 +42,10 @@ def get_metrics_scores(metrics, results, label_names):
index=["train", "test"], index=["train", "test"],
columns=classifier_names)) columns=classifier_names))
for metric in metrics) for metric in metrics)
class_metric_scores = dict((metric[0], pd.DataFrame(
index=pd.MultiIndex.from_product([["train", "test"], label_names]),
columns=classifier_names, dtype=float))
for metric in metrics)
for metric in metrics: for metric in metrics:
for classifier_result in results: for classifier_result in results:
...@@ -50,12 +55,6 @@ def get_metrics_scores(metrics, results, label_names): ...@@ -50,12 +55,6 @@ def get_metrics_scores(metrics, results, label_names):
metrics_scores[metric[0]].loc[ metrics_scores[metric[0]].loc[
"test", classifier_result.get_classifier_name()] = \ "test", classifier_result.get_classifier_name()] = \
classifier_result.metrics_scores[metric[0]][1] classifier_result.metrics_scores[metric[0]][1]
class_metric_scores = dict((metric[0], pd.DataFrame(index=pd.MultiIndex.from_product([["train", "test"], label_names]),
columns=classifier_names, dtype=float))
for metric in metrics)
for metric in metrics:
for classifier_result in results:
for label_index, label_name in enumerate(label_names): for label_index, label_name in enumerate(label_names):
class_metric_scores[metric[0]].loc[( class_metric_scores[metric[0]].loc[(
"train", label_name),classifier_result.get_classifier_name()] = \ "train", label_name),classifier_result.get_classifier_name()] = \
...@@ -97,7 +96,7 @@ def publish_metrics_graphs(metrics_scores, directory, database_name, ...@@ -97,7 +96,7 @@ def publish_metrics_graphs(metrics_scores, directory, database_name,
class_test_scores = init_plot(results, metric_name, class_test_scores = init_plot(results, metric_name,
metrics_scores[metric_name], metrics_scores[metric_name],
directory, directory,
database_name, labels_names, database_name,
class_metric_scores[metric_name]) class_metric_scores[metric_name])
plot_metric_scores(train_scores, test_scores, classifier_names, plot_metric_scores(train_scores, test_scores, classifier_names,
...@@ -148,32 +147,8 @@ def publish_all_metrics_scores(iter_results, class_iter_results, directory, ...@@ -148,32 +147,8 @@ def publish_all_metrics_scores(iter_results, class_iter_results, directory,
plot_class_metric_scores(test, file_name, label_names, classifier_names, metric_name, stds=test_std, tag="averaged") plot_class_metric_scores(test, file_name, label_names, classifier_names, metric_name, stds=test_std, tag="averaged")
return results return results
# def publish_all_class_metrics_scores(iter_results, directory,
# data_base_name, stats_iter,
# min_size=10):
# results = []
# secure_file_path(os.path.join(directory, "a"))
#
# for metric_name, scores in iter_results.items():
# train = np.array(scores["mean"].loc["train"])
# test = np.array(scores["mean"].loc["test"])
# names = np.array(scores["mean"].columns)
# train_std = np.array(scores["std"].loc["train"])
# test_std = np.array(scores["std"].loc["test"])
#
# file_name = os.path.join(directory, data_base_name + "-mean_on_" + str(
# stats_iter) + "_iter-" + metric_name+"-class")
#
# plot_class_metric_scores(test, file_name, labels_names=names, file_name, tag=" averaged",
# train_STDs=train_std, test_STDs=test_std)
# results += [[classifier_name, metric_name, test_mean, test_std]
# for classifier_name, test_mean, test_std
# in zip(names, test, test_std)]
# return results
def init_plot(results, metric_name, metric_dataframe, def init_plot(results, metric_name, metric_dataframe,
directory, database_name, labels_names, class_metric_scores): directory, database_name, class_metric_scores):
train = np.array(metric_dataframe.loc["train"]) train = np.array(metric_dataframe.loc["train"])
test = np.array(metric_dataframe.loc["test"]) test = np.array(metric_dataframe.loc["test"])
class_test = np.array(class_metric_scores.loc["test"]) class_test = np.array(class_metric_scores.loc["test"])
...@@ -181,8 +156,7 @@ def init_plot(results, metric_name, metric_dataframe, ...@@ -181,8 +156,7 @@ def init_plot(results, metric_name, metric_dataframe,
nb_results = metric_dataframe.shape[1] nb_results = metric_dataframe.shape[1]
file_name = os.path.join(directory, database_name + "-" + "_vs_".join( file_name = os.path.join(directory, database_name + "-" + metric_name)
labels_names) + "-" + metric_name)
results += [[classifiers_name, metric_name, test_mean, test_std, class_mean] results += [[classifiers_name, metric_name, test_mean, test_std, class_mean]
for classifiers_name, test_mean, class_mean, test_std in for classifiers_name, test_mean, class_mean, test_std in
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment