Skip to content
Snippets Groups Projects
Commit 08a88f7b authored by Dominique Benielli's avatar Dominique Benielli
Browse files

refactoring names

parent eb36a71f
Branches
Tags
No related merge requests found
Showing
with 73 additions and 134 deletions
from . import DifficultyFusionModule, analyzeResults
\ No newline at end of file
from ...Multiview import analyzeResults
# Author-Info
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule):
return analyzeResults.execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
from ...Multiview import analyzeResults
# Author-Info
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule):
return analyzeResults.execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
from ...Multiview import analyzeResults
# Author-Info
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule):
return analyzeResults.execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times,
name, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
from . import Dataset, execution, HyperParameterSearch, Transformations
......@@ -2,4 +2,4 @@
__version__ = "0.0.0.0"
from . import MonoMultiViewClassifiers, Tests, Exec, Versions
from . import mono_multi_view_classifiers, tests, execute, versions
File moved
from . import ExecClassif, ResultAnalysis, Metrics, MonoviewClassifiers, \
Monoview, Multiview, utils, MultiviewClassifiers
from . import exec_classif, result_analysis, metrics, monoview_classifiers, \
monoview, multiview, utils, multiview_classifiers
__all__ = ['Metrics', 'Monoview', 'MonoviewClassifiers', 'Multiview', 'utils']
......@@ -15,14 +15,14 @@ matplotlib.use(
'Agg') # Anti-Grain Geometry C++ library to make a raster (pixel) image of the figure
# Import own modules
from . import MonoviewClassifiers
from . import MultiviewClassifiers
from .Multiview.ExecMultiview import ExecMultiview, ExecMultiview_multicore
from .Monoview.ExecClassifMonoView import ExecMonoview, ExecMonoview_multicore
from .utils import GetMultiviewDb as DB
from .ResultAnalysis import \
from . import monoview_classifiers
from . import multiview_classifiers
from .multiview.exec_multiview import ExecMultiview, ExecMultiview_multicore
from .monoview.exec_classif_mono_view import ExecMonoview, ExecMonoview_multicore
from .utils import get_multiview_db as DB
from .result_analysis import \
getResults, plot_results_noise # resultAnalysis, analyzeLabels, analyzeIterResults, analyzeIterLabels, genNamesFromRes,
from .utils import execution, Dataset, Multiclass
from .utils import execution, dataset, multiclass
# Author-Info
__author__ = "Baptiste Bauvin"
......@@ -53,30 +53,30 @@ def initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args):
benchmark : Dictionary of dictionaries
Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
"""
benchmark = {"Monoview": {}, "Multiview": {}}
benchmark = {"monoview": {}, "multiview": {}}
allMultiviewPackages = [name for _, name, isPackage
in pkgutil.iter_modules(
['./MonoMultiViewClassifiers/MultiviewClassifiers/']) if isPackage]
['./mono_multi_view_classifiers/multiview_classifiers/']) if isPackage]
if "Monoview" in CL_type:
if "monoview" in CL_type:
if monoviewAlgos == ['']:
benchmark["Monoview"] = [name for _, name, isPackage in
benchmark["monoview"] = [name for _, name, isPackage in
pkgutil.iter_modules([
"./MonoMultiViewClassifiers/MonoviewClassifiers"])
"./mono_multi_view_classifiers/monoview_classifiers"])
if not isPackage]
else:
benchmark["Monoview"] = monoviewAlgos
benchmark["monoview"] = monoviewAlgos
if "Multiview" in CL_type:
benchmark["Multiview"] = {}
if "multiview" in CL_type:
benchmark["multiview"] = {}
if multiviewAlgos == [""]:
algosMutliview = allMultiviewPackages
else:
algosMutliview = multiviewAlgos
for multiviewPackageName in allMultiviewPackages:
if multiviewPackageName in algosMutliview:
multiviewPackage = getattr(MultiviewClassifiers,
multiviewPackage = getattr(multiview_classifiers,
multiviewPackageName)
multiviewModule = getattr(multiviewPackage,
multiviewPackageName + "Module")
......@@ -85,14 +85,14 @@ def initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args):
if CL_type == ["Benchmark"]:
allMonoviewAlgos = [name for _, name, isPackage in
pkgutil.iter_modules([
'./MonoMultiViewClassifiers/MonoviewClassifiers'])
'./mono_multi_view_classifiers/monoview_classifiers'])
if (not isPackage) and name not in ["framework"]]
benchmark["Monoview"] = allMonoviewAlgos
benchmark["Multiview"] = dict(
benchmark["monoview"] = allMonoviewAlgos
benchmark["multiview"] = dict(
(multiviewPackageName, "_") for multiviewPackageName in
allMultiviewPackages)
for multiviewPackageName in allMultiviewPackages:
multiviewPackage = getattr(MultiviewClassifiers,
multiviewPackage = getattr(multiview_classifiers,
multiviewPackageName)
multiviewModule = getattr(multiviewPackage,
multiviewPackageName + "Module")
......@@ -153,20 +153,20 @@ def initMonoviewExps(benchmark, viewsDictionary, nbClass, kwargsInit):
benchmark : Dictionary of dictionaries
Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
"""
argumentDictionaries = {"Monoview": [], "Multiview": []}
if benchmark["Monoview"]:
argumentDictionaries["Monoview"] = []
argumentDictionaries = {"monoview": [], "multiview": []}
if benchmark["monoview"]:
argumentDictionaries["monoview"] = []
for viewName, viewIndex in viewsDictionary.items():
for classifier in benchmark["Monoview"]:
for classifier in benchmark["monoview"]:
if multiple_args(classifier, kwargsInit):
argumentDictionaries["Monoview"] += gen_multiple_args_dictionnaries(nbClass, kwargsInit, classifier, viewName, viewIndex)
argumentDictionaries["monoview"] += gen_multiple_args_dictionnaries(nbClass, kwargsInit, classifier, viewName, viewIndex)
else:
arguments = {
"args": {classifier + "KWARGS": dict((key, value[0]) for key, value in kwargsInit[
classifier + "KWARGSInit"].items()), "feat": viewName,
"CL_type": classifier, "nbClass": nbClass},
"viewIndex": viewIndex}
argumentDictionaries["Monoview"].append(arguments)
argumentDictionaries["monoview"].append(arguments)
return argumentDictionaries
def multiple_args(classifier, kwargsInit):
......@@ -228,24 +228,24 @@ def initMonoviewKWARGS(args, classifiersNames):
For example, for Adaboost, the KWARGS will be `{"n_estimators":<value>, "base_estimator":<value>}`"""
logging.debug("Start:\t Initializing Monoview classifiers arguments")
logging.debug("Start:\t Initializing monoview classifiers arguments")
monoviewKWARGS = {}
for classifiersName in classifiersNames:
try:
classifierModule = getattr(MonoviewClassifiers, classifiersName)
classifierModule = getattr(monoview_classifiers, classifiersName)
except AttributeError:
raise AttributeError(
classifiersName + " is not implemented in MonoviewClassifiers, "
"please specify the name of the file in MonoviewClassifiers")
classifiersName + " is not implemented in monoview_classifiers, "
"please specify the name of the file in monoview_classifiers")
monoviewKWARGS[
classifiersName + "KWARGSInit"] = classifierModule.formatCmdArgs(
args)
logging.debug("Done:\t Initializing Monoview classifiers arguments")
logging.debug("Done:\t Initializing monoview classifiers arguments")
return monoviewKWARGS
def initKWARGSFunc(args, benchmark):
monoviewKWARGS = initMonoviewKWARGS(args, benchmark["Monoview"])
monoviewKWARGS = initMonoviewKWARGS(args, benchmark["monoview"])
return monoviewKWARGS
......@@ -253,11 +253,11 @@ def initMultiviewArguments(args, benchmark, views, viewsIndices,
argumentDictionaries, randomState, directory,
resultsMonoview, classificationIndices):
"""Used to add each monoview exeperience args to the list of monoview experiences args"""
logging.debug("Start:\t Initializing Multiview classifiers arguments")
logging.debug("Start:\t Initializing multiview classifiers arguments")
multiviewArguments = []
if "Multiview" in benchmark:
for multiviewAlgoName in benchmark["Multiview"]:
multiviewPackage = getattr(MultiviewClassifiers, multiviewAlgoName)
if "multiview" in benchmark:
for multiviewAlgoName in benchmark["multiview"]:
multiviewPackage = getattr(multiview_classifiers, multiviewAlgoName)
mutliviewModule = getattr(multiviewPackage,
multiviewAlgoName + "Module")
......@@ -267,8 +267,8 @@ def initMultiviewArguments(args, benchmark, views, viewsIndices,
directory,
resultsMonoview,
classificationIndices)
argumentDictionaries["Multiview"] = multiviewArguments
logging.debug("Start:\t Initializing Multiview classifiers arguments")
argumentDictionaries["multiview"] = multiviewArguments
logging.debug("Start:\t Initializing multiview classifiers arguments")
return argumentDictionaries
......@@ -334,7 +334,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
classificationIndices, labels,
LABELS_DICTIONARY, kFolds)
logging.debug("Start:\t Monoview benchmark")
logging.debug("Start:\t monoview benchmark")
resultsMonoview += [
ExecMonoview_multicore(directory, args.name, labelsNames,
classificationIndices, kFolds,
......@@ -343,19 +343,19 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
hyperParamSearch=hyperParamSearch,
metrics=metrics,
nIter=args.CL_HPS_iter, **argument)
for argument in argumentDictionaries["Monoview"]]
logging.debug("Done:\t Monoview benchmark")
for argument in argumentDictionaries["monoview"]]
logging.debug("Done:\t monoview benchmark")
logging.debug("Start:\t Multiview arguments initialization")
logging.debug("Start:\t multiview arguments initialization")
argumentDictionaries = initMultiviewArguments(args, benchmark, views,
viewsIndices,
argumentDictionaries,
randomState, directory,
resultsMonoview,
classificationIndices)
logging.debug("Done:\t Multiview arguments initialization")
logging.debug("Done:\t multiview arguments initialization")
logging.debug("Start:\t Multiview benchmark")
logging.debug("Start:\t multiview benchmark")
resultsMultiview = [
ExecMultiview_multicore(directory, coreIndex, args.name,
classificationIndices, kFolds, args.type,
......@@ -363,8 +363,8 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
labels, hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=args.CL_HPS_iter,
**arguments)
for arguments in argumentDictionaries["Multiview"]]
logging.debug("Done:\t Multiview benchmark")
for arguments in argumentDictionaries["multiview"]]
logging.debug("Done:\t multiview benchmark")
return [flag, resultsMonoview + resultsMultiview]
......@@ -387,8 +387,8 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
classificationIndices, labels,
LABELS_DICTIONARY, kFolds)
logging.debug("Start:\t Monoview benchmark")
nbExperiments = len(argumentDictionaries["Monoview"])
logging.debug("Start:\t monoview benchmark")
nbExperiments = len(argumentDictionaries["monoview"])
nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
for stepIndex in range(nbMulticoreToDo):
resultsMonoview += (Parallel(n_jobs=nbCores)(
......@@ -399,24 +399,24 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
hyperParamSearch=hyperParamSearch,
metrics=metrics,
nIter=args.CL_HPS_iter,
**argumentDictionaries["Monoview"][
**argumentDictionaries["monoview"][
coreIndex + stepIndex * nbCores])
for coreIndex in
range(min(nbCores, nbExperiments - stepIndex * nbCores))))
logging.debug("Done:\t Monoview benchmark")
logging.debug("Done:\t monoview benchmark")
logging.debug("Start:\t Multiview arguments initialization")
logging.debug("Start:\t multiview arguments initialization")
argumentDictionaries = initMultiviewArguments(args, benchmark, views,
viewsIndices,
argumentDictionaries,
randomState, directory,
resultsMonoview,
classificationIndices)
logging.debug("Done:\t Multiview arguments initialization")
logging.debug("Done:\t multiview arguments initialization")
logging.debug("Start:\t Multiview benchmark")
logging.debug("Start:\t multiview benchmark")
resultsMultiview = []
nbExperiments = len(argumentDictionaries["Multiview"])
nbExperiments = len(argumentDictionaries["multiview"])
nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
for stepIndex in range(nbMulticoreToDo):
resultsMultiview += Parallel(n_jobs=nbCores)(
......@@ -429,11 +429,11 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
metrics=metrics,
nIter=args.CL_HPS_iter,
**
argumentDictionaries["Multiview"][
argumentDictionaries["multiview"][
stepIndex * nbCores + coreIndex])
for coreIndex in
range(min(nbCores, nbExperiments - stepIndex * nbCores)))
logging.debug("Done:\t Multiview benchmark")
logging.debug("Done:\t multiview benchmark")
return [flag, resultsMonoview + resultsMultiview]
......@@ -452,8 +452,8 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
resultsMonoview, labelsNames = benchmarkInit(directory,
classificationIndices, labels,
LABELS_DICTIONARY, kFolds)
logging.debug("Start:\t Monoview benchmark")
for arguments in argumentDictionaries["Monoview"]:
logging.debug("Start:\t monoview benchmark")
for arguments in argumentDictionaries["monoview"]:
X = DATASET.get("View" + str(arguments["viewIndex"]))
Y = labels
resultsMonoview += [
......@@ -462,9 +462,9 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
1, args.type, args.pathF, randomState,
hyperParamSearch=hyperParamSearch, metrics=metrics,
nIter=args.CL_HPS_iter, **arguments)]
logging.debug("Done:\t Monoview benchmark")
logging.debug("Done:\t monoview benchmark")
logging.debug("Start:\t Multiview arguments initialization")
logging.debug("Start:\t multiview arguments initialization")
argumentDictionaries = initMultiviewArguments(args, benchmark, views,
viewsIndices,
......@@ -472,18 +472,18 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
randomState, directory,
resultsMonoview,
classificationIndices)
logging.debug("Done:\t Multiview arguments initialization")
logging.debug("Done:\t multiview arguments initialization")
logging.debug("Start:\t Multiview benchmark")
logging.debug("Start:\t multiview benchmark")
resultsMultiview = []
for arguments in argumentDictionaries["Multiview"]:
for arguments in argumentDictionaries["multiview"]:
resultsMultiview += [
ExecMultiview(directory, DATASET, args.name, classificationIndices,
kFolds, 1, args.type,
args.pathF, LABELS_DICTIONARY, randomState, labels,
hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=args.CL_HPS_iter, **arguments)]
logging.debug("Done:\t Multiview benchmark")
logging.debug("Done:\t multiview benchmark")
return [flag, resultsMonoview + resultsMultiview]
......@@ -519,7 +519,7 @@ def execBenchmark(nbCores, statsIter, nbMulticlass,
indices for the biclass training set, the ones for the biclass testing set and the ones for the
multiclass testing set.
metrics : list of lists
Metrics that will be used to evaluate the algorithms performance.
metrics that will be used to evaluate the algorithms performance.
labelsDictionary : dictionary
Dictionary mapping labels indices to labels names.
nbLabels : int
......@@ -618,13 +618,13 @@ def execClassif(arguments):
splits = execution.genSplits(DATASET.get("Labels").value, args.CL_split,
statsIterRandomStates)
multiclassLabels, labelsCombinations, indicesMulticlass = Multiclass.genMulticlassLabels(
multiclassLabels, labelsCombinations, indicesMulticlass = multiclass.genMulticlassLabels(
DATASET.get("Labels").value, multiclassMethod, splits)
kFolds = execution.genKFolds(statsIter, args.CL_nbFolds,
statsIterRandomStates)
datasetFiles = Dataset.initMultipleDatasets(args.pathF, args.name, nbCores)
datasetFiles = dataset.initMultipleDatasets(args.pathF, args.name, nbCores)
# if not views:
# raise ValueError("Empty views list, modify selected views to match dataset " + args.views)
......@@ -638,7 +638,7 @@ def execClassif(arguments):
if metrics == [[""]]:
metricsNames = [name for _, name, isPackage
in pkgutil.iter_modules(
['./MonoMultiViewClassifiers/Metrics']) if
['./mono_multi_view_classifiers/metrics']) if
not isPackage and name not in ["framework", "log_loss",
"matthews_corrcoef",
"roc_auc_score"]]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment