Skip to content
Snippets Groups Projects
Commit d5235b9c authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Cleaned

parent c3e752d3
Branches
Tags
No related merge requests found
Pipeline #3955 failed
...@@ -11,15 +11,14 @@ import itertools ...@@ -11,15 +11,14 @@ import itertools
import numpy as np import numpy as np
from joblib import Parallel, delayed from joblib import Parallel, delayed
from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier
# Import own modules # Import own modules
from . import monoview_classifiers from . import monoview_classifiers
from . import multiview_classifiers from . import multiview_classifiers
from .multiview.exec_multiview import exec_multiview, exec_multiview_multicore from .multiview.exec_multiview import exec_multiview, exec_multiview_multicore
from .monoview.exec_classif_mono_view import exec_monoview, exec_monoview_multicore from .monoview.exec_classif_mono_view import exec_monoview, exec_monoview_multicore
from .utils.dataset import delete_HDF5 from .utils.dataset import delete_HDF5
from .result_analysis import get_results from .result_analysis import get_results, plot_results_noise, analyze_biclass
from .result_analysis import plot_results_noise
# resultAnalysis, analyzeLabels, analyzeIterResults, analyzeIterLabels, genNamesFromRes,
from .utils import execution, dataset, multiclass, configuration from .utils import execution, dataset, multiclass, configuration
matplotlib.use( matplotlib.use(
...@@ -772,7 +771,6 @@ def exec_benchmark(nb_cores, stats_iter, nb_multiclass, ...@@ -772,7 +771,6 @@ def exec_benchmark(nb_cores, stats_iter, nb_multiclass,
# else: # else:
for arguments in benchmark_arguments_dictionaries: for arguments in benchmark_arguments_dictionaries:
benchmark_results = exec_one_benchmark_mono_core(dataset_var=dataset_var, **arguments) benchmark_results = exec_one_benchmark_mono_core(dataset_var=dataset_var, **arguments)
from .result_analysis import analyze_biclass
analyze_biclass([benchmark_results], benchmark_arguments_dictionaries, stats_iter, metrics, example_ids=dataset_var.example_ids) analyze_biclass([benchmark_results], benchmark_arguments_dictionaries, stats_iter, metrics, example_ids=dataset_var.example_ids)
results += [benchmark_results] results += [benchmark_results]
logging.debug("Done:\t Executing all the needed biclass benchmarks") logging.debug("Done:\t Executing all the needed biclass benchmarks")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment