Skip to content
Snippets Groups Projects
Commit e0e15256 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Logging level adjusted

parent bfb47d01
No related branches found
No related tags found
No related merge requests found
......@@ -300,7 +300,7 @@ def init_kwargs(args, classifiers_names, framework="monoview"):
For example, for Adaboost, the KWARGS will be `{"n_estimators":<value>, "base_estimator":<value>}`"""
logging.debug("Start:\t Initializing monoview classifiers arguments")
logging.info("Start:\t Initializing monoview classifiers arguments")
kwargs = {}
for classifiers_name in classifiers_names:
try:
......@@ -316,7 +316,7 @@ def init_kwargs(args, classifiers_names, framework="monoview"):
kwargs[classifiers_name] = args[classifiers_name]
else:
kwargs[classifiers_name] = {}
logging.debug("Done:\t Initializing monoview classifiers arguments")
logging.info("Done:\t Initializing monoview classifiers arguments")
return kwargs
......@@ -402,7 +402,7 @@ def benchmark_init(directory, classification_indices, labels, labels_dictionary,
-------
"""
logging.debug("Start:\t Benchmark initialization")
logging.info("Start:\t Benchmark initialization")
secure_file_path(os.path.join(directory, "train_labels.csv"))
train_indices = classification_indices[0]
train_labels = dataset_var.get_labels(sample_indices=train_indices)
......@@ -421,7 +421,7 @@ def benchmark_init(directory, classification_indices, labels, labels_dictionary,
np.savetxt(file_name, train_labels[test_cv_indices[:min_fold_len]],
delimiter=",")
labels_names = list(labels_dictionary.values())
logging.debug("Done:\t Benchmark initialization")
logging.info("Done:\t Benchmark initialization")
return results_monoview, labels_names
......@@ -550,7 +550,7 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
labels_dictionary, k_folds,
dataset_var)
logging.getLogger('matplotlib.font_manager').disabled = True
logging.debug("Start:\t monoview benchmark")
logging.info("Start:\t monoview benchmark")
traceback_outputs = {}
for arguments in argument_dictionaries["monoview"]:
try:
......@@ -571,9 +571,9 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
else:
raise
logging.debug("Done:\t monoview benchmark")
logging.info("Done:\t monoview benchmark")
logging.debug("Start:\t multiview arguments initialization")
logging.info("Start:\t multiview arguments initialization")
# argument_dictionaries = initMultiviewArguments(args, benchmark, views,
# views_indices,
......@@ -581,9 +581,9 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
# random_state, directory,
# resultsMonoview,
# classification_indices)
logging.debug("Done:\t multiview arguments initialization")
logging.info("Done:\t multiview arguments initialization")
logging.debug("Start:\t multiview benchmark")
logging.info("Start:\t multiview benchmark")
results_multiview = []
for arguments in argument_dictionaries["multiview"]:
try:
......@@ -602,7 +602,7 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
arguments["classifier_name"]] = traceback.format_exc()
else:
raise
logging.debug("Done:\t multiview benchmark")
logging.info("Done:\t multiview benchmark")
return [flag, results_monoview + results_multiview, traceback_outputs]
......@@ -653,7 +653,7 @@ def exec_benchmark(nb_cores, stats_iter,
results : list of lists
The results of the benchmark.
"""
logging.debug("Start:\t Executing all the needed benchmarks")
logging.info("Start:\t Executing all the needed benchmarks")
results = []
# if nb_cores > 1:
# if stats_iter > 1 or nb_multiclass > 1:
......@@ -681,17 +681,17 @@ def exec_benchmark(nb_cores, stats_iter,
metrics, sample_ids=dataset_var.sample_ids,
labels=dataset_var.get_labels())
results += [benchmark_results]
logging.debug("Done:\t Executing all the needed benchmarks")
logging.info("Done:\t Executing all the needed benchmarks")
# Do everything with flagging
logging.debug("Start:\t Analyzing predictions")
logging.info("Start:\t Analyzing predictions")
results_mean_stds = analyze(results, stats_iter,
benchmark_arguments_dictionaries,
metrics,
directory,
dataset_var.sample_ids,
dataset_var.get_labels())
logging.debug("Done:\t Analyzing predictions")
logging.info("Done:\t Analyzing predictions")
delete(benchmark_arguments_dictionaries, nb_cores, dataset_var)
return results_mean_stds
......
......@@ -55,7 +55,7 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
random_state, hyper_param_search="Random",
metrics={"accuracy_score*": {}}, n_iter=30, view_name="",
hps_kwargs={}, **args):
logging.debug("Start:\t Loading data")
logging.info("Start:\t Loading data")
kwargs, \
t_start, \
view_name, \
......@@ -68,9 +68,9 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
base_file_name = init_constants(args, X, classification_indices,
labels_names,
database_name, directory, view_name, )
logging.debug("Done:\t Loading data")
logging.info("Done:\t Loading data")
logging.debug(
logging.info(
"Info:\t Classification - Database:" + str(
database_name) + " View:" + str(
view_name) + " train ratio:"
......@@ -78,17 +78,17 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
k_folds.n_splits) + ", cores:"
+ str(nb_cores) + ", algorithm : " + classifier_name)
logging.debug("Start:\t Determine Train/Test split")
logging.info("Start:\t Determine Train/Test split")
X_train, y_train, X_test, y_test = init_train_test(X, Y,
classification_indices)
logging.debug("Info:\t Shape X_train:" + str(
logging.info("Info:\t Shape X_train:" + str(
X_train.shape) + ", Length of y_train:" + str(len(y_train)))
logging.debug("Info:\t Shape X_test:" + str(
logging.info("Info:\t Shape X_test:" + str(
X_test.shape) + ", Length of y_test:" + str(len(y_test)))
logging.debug("Done:\t Determine Train/Test split")
logging.info("Done:\t Determine Train/Test split")
logging.debug("Start:\t Generate classifier args")
logging.info("Start:\t Generate classifier args")
classifier_module = getattr(monoview_classifiers, classifier_name)
classifier_class_name = classifier_module.classifier_class_name
hyper_param_beg = time.monotonic()
......@@ -100,9 +100,9 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
k_folds, nb_cores, metrics, kwargs,
**hps_kwargs)
hyper_param_duration = time.monotonic() - hyper_param_beg
logging.debug("Done:\t Generate classifier args")
logging.info("Done:\t Generate classifier args")
logging.debug("Start:\t Training")
logging.info("Start:\t Training")
classifier = get_mc_estim(getattr(classifier_module,
classifier_class_name)
......@@ -112,9 +112,9 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
fit_beg = time.monotonic()
classifier.fit(X_train, y_train) # NB_CORES=nbCores,
fit_duration = time.monotonic() - fit_beg
logging.debug("Done:\t Training")
logging.info("Done:\t Training")
logging.debug("Start:\t Predicting")
logging.info("Start:\t Predicting")
train_pred = classifier.predict(X_train)
pred_beg = time.monotonic()
test_pred = classifier.predict(X_test)
......@@ -127,14 +127,14 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
for testIndex, index in enumerate(classification_indices[1]):
full_pred[index] = test_pred[testIndex]
logging.debug("Done:\t Predicting")
logging.info("Done:\t Predicting")
whole_duration = time.monotonic() - t_start
logging.debug(
logging.info(
"Info:\t Duration for training and predicting: " + str(
whole_duration) + "[s]")
logging.debug("Start:\t Getting results")
logging.info("Start:\t Getting results")
result_analyzer = MonoviewResultAnalyzer(view_name=view_name,
classifier_name=classifier_name,
shape=X.shape,
......@@ -154,9 +154,9 @@ def exec_monoview(directory, X, Y, database_name, labels_names,
duration=whole_duration)
string_analysis, images_analysis, metrics_scores, class_metrics_scores, \
confusion_matrix = result_analyzer.analyze()
logging.debug("Done:\t Getting results")
logging.info("Done:\t Getting results")
logging.debug("Start:\t Saving preds")
logging.info("Start:\t Saving preds")
save_results(string_analysis, output_file_name, full_pred, train_pred,
y_train, images_analysis, y_test, confusion_matrix)
logging.info("Done:\t Saving results")
......@@ -203,7 +203,7 @@ def get_hyper_params(classifier_module, search_method, classifier_module_name,
output_file_name, k_folds, nb_cores, metrics, kwargs,
**hps_kwargs):
if search_method != "None":
logging.debug(
logging.info(
"Start:\t " + search_method + " best settings for " + classifier_module_name)
classifier_hp_search = getattr(hyper_parameter_search, search_method)
estimator = getattr(classifier_module, classifier_class_name)(
......@@ -218,7 +218,7 @@ def get_hyper_params(classifier_module, search_method, classifier_module_name,
hps.fit(X_train, y_train, **kwargs[classifier_module_name])
cl_kwargs = hps.get_best_params()
hps.gen_report(output_file_name)
logging.debug("Done:\t " + search_method + " best settings")
logging.info("Done:\t " + search_method + " best settings")
else:
cl_kwargs = kwargs[classifier_module_name]
return cl_kwargs
......
......@@ -237,7 +237,7 @@ def exec_multiview(directory, dataset_var, name, classification_indices,
``MultiviewResult``
"""
logging.debug("Start:\t Initialize constants")
logging.info("Start:\t Initialize constants")
cl_type, \
t_start, \
views_indices, \
......@@ -250,16 +250,16 @@ def exec_multiview(directory, dataset_var, name, classification_indices,
base_file_name, \
metrics = init_constants(kwargs, classification_indices, metrics, name,
nb_cores, k_folds, dataset_var, directory)
logging.debug("Done:\t Initialize constants")
logging.info("Done:\t Initialize constants")
extraction_time = time.time() - t_start
logging.info("Info:\t Extraction duration " + str(extraction_time) + "s")
logging.debug("Start:\t Getting train/test split")
logging.info("Start:\t Getting train/test split")
learning_indices, validation_indices = classification_indices
logging.debug("Done:\t Getting train/test split")
logging.info("Done:\t Getting train/test split")
logging.debug("Start:\t Getting classifiers modules")
logging.info("Start:\t Getting classifiers modules")
# classifierPackage = getattr(multiview_classifiers,
# CL_type) # Permet d'appeler un module avec une string
classifier_module = getattr(multiview_classifiers, cl_type)
......
......@@ -47,7 +47,7 @@ def get_sample_errors(groud_truth, results):
def publish_sample_errors(sample_errors, directory, databaseName,
labels_names, sample_ids, labels): # pragma: no cover
logging.debug("Start:\t Label analysis figure generation")
logging.info("Start:\t Label analysis figure generation")
base_file_name = os.path.join(directory, databaseName + "-")
......@@ -64,13 +64,13 @@ def publish_sample_errors(sample_errors, directory, databaseName,
plot_errors_bar(error_on_samples, nb_samples,
base_file_name, sample_ids=sample_ids)
logging.debug("Done:\t Label analysis figures generation")
logging.info("Done:\t Label analysis figures generation")
def publish_all_sample_errors(iter_results, directory,
stats_iter,
sample_ids, labels): # pragma: no cover
logging.debug(
logging.info(
"Start:\t Global label analysis figure generation")
nb_samples, nb_classifiers, data, \
......@@ -87,7 +87,7 @@ def publish_all_sample_errors(iter_results, directory,
plot_errors_bar(error_on_samples, nb_samples, os.path.join(directory, ""),
sample_ids=sample_ids)
logging.debug(
logging.info(
"Done:\t Global label analysis figures generation")
......
......@@ -65,7 +65,7 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
label combination, regrouping the scores for each metrics and the
information useful to plot errors on samples.
"""
logging.debug("Start:\t Analyzing all results")
logging.info("Start:\t Analyzing all results")
iter_results = {"metrics_scores": [i for i in range(stats_iter)],
"class_metrics_scores": [i for i in range(stats_iter)],
"sample_errors": [i for i in range(stats_iter)],
......@@ -105,7 +105,7 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
iter_results["labels"] = labels
iter_results["durations"][iter_index] = durations
logging.debug("Done:\t Analyzing all results")
logging.info("Done:\t Analyzing all results")
return res, iter_results, flagged_tracebacks_list, labels_names
......
......@@ -95,7 +95,7 @@ def publish_metrics_graphs(metrics_scores, directory, database_name,
"""
results = []
for metric_name in metrics_scores.keys():
logging.debug(
logging.info(
"Start:\t Score graph generation for " + metric_name)
train_scores, test_scores, classifier_names, \
file_name, nb_results, results, \
......@@ -112,7 +112,7 @@ def publish_metrics_graphs(metrics_scores, directory, database_name,
class_file_name = file_name+"-class"
plot_class_metric_scores(class_test_scores, class_file_name,
labels_names, classifier_names, metric_name)
logging.debug(
logging.info(
"Done:\t Score graph generation for " + metric_name)
return results
......
......@@ -693,14 +693,14 @@ def init_multiple_datasets(path_f, name, nb_cores): # pragma: no cover
"""
if nb_cores > 1:
if datasets_already_exist(path_f, name, nb_cores):
logging.debug(
logging.info(
"Info:\t Enough copies of the dataset are already available")
pass
else:
if os.path.getsize(
os.path.join(path_f, name + ".hdf5")) * nb_cores / float(
1024) / 1000 / 1000 > 0.1:
logging.debug("Start:\t Creating " + str(
logging.info("Start:\t Creating " + str(
nb_cores) + " temporary datasets for multiprocessing")
logging.warning(
r" WARNING : /!\ This may use a lot of HDD storage space : " +
......@@ -715,7 +715,7 @@ def init_multiple_datasets(path_f, name, nb_cores): # pragma: no cover
else:
pass
dataset_files = copy_hdf5(path_f, name, nb_cores)
logging.debug("Start:\t Creating datasets for multiprocessing")
logging.info("Start:\t Creating datasets for multiprocessing")
return dataset_files
......@@ -732,10 +732,10 @@ def copy_hdf5(pathF, name, nbCores):
def delete_HDF5(benchmarkArgumentsDictionaries, nbCores, dataset):
"""Used to delete temporary copies at the end of the benchmark"""
if nbCores > 1:
logging.debug("Start:\t Deleting " + str(
logging.info("Start:\t Deleting " + str(
nbCores) + " temporary datasets for multiprocessing")
args = benchmarkArgumentsDictionaries[0]["args"]
logging.debug("Start:\t Deleting datasets for multiprocessing")
logging.info("Start:\t Deleting datasets for multiprocessing")
for coreIndex in range(nbCores):
os.remove(args["pathf"] + args["name"] + str(coreIndex) + ".hdf5")
......
......@@ -171,7 +171,7 @@ def init_log_file(name, views, cl_type, log, debug, label,
log_file_path = os.path.join(result_directory, log_file_name)
os.makedirs(os.path.dirname(log_file_path))
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename=log_file_path, level=logging.DEBUG,
filename=log_file_path, level=logging.INFO,
filemode='w')
if log:
logging.getLogger().addHandler(logging.StreamHandler())
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment