diff --git a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py index 31b56ffd453b1cb21472684c77a0294ddc9bd9c8..6d31ee65a211c3d6a752d72de5e4055a9f2e5021 100644 --- a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py +++ b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py @@ -156,14 +156,27 @@ def initMonoviewExps(benchmark, viewsDictionary, nbClass, kwargsInit): argumentDictionaries["Monoview"] = [] for viewName, viewIndex in viewsDictionary.items(): for classifier in benchmark["Monoview"]: - arguments = { - "args": {classifier + "KWARGS": kwargsInit[ - classifier + "KWARGSInit"], "feat": viewName, - "CL_type": classifier, "nbClass": nbClass}, - "viewIndex": viewIndex} - argumentDictionaries["Monoview"].append(arguments) + if multiple_args(classifier, kwargsInit): + argumentDictionaries["Monoview"] += gen_multiple_args_dictionnaries(nbClass, kwargsInit) + else: + arguments = { + "args": {classifier + "KWARGS": kwargsInit[ + classifier + "KWARGSInit"], "feat": viewName, + "CL_type": classifier, "nbClass": nbClass}, + "viewIndex": viewIndex} + argumentDictionaries["Monoview"].append(arguments) return argumentDictionaries +def multiple_args(classifier, kwargsInit): + listed_args = [type(value) == list and len(value)>1 for key, value in kwargsInit[classifier + "KWARGSInit"].items()] + if True in listed_args: + return True + else: + return False + + +def gen_multiple_args_dictionnaries(nbClass, kwargsInit): + def initMonoviewKWARGS(args, classifiersNames): r"""Used to init kwargs thanks to a function in each monoview classifier package. @@ -600,6 +613,7 @@ def execClassif(arguments): metrics[metricIndex] = [metric[0], None] benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args) + print(benchmark, "\n") initKWARGS = initKWARGSFunc(args, benchmark) @@ -607,6 +621,7 @@ def execClassif(arguments): argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary, NB_CLASS, initKWARGS) + print(argumentDictionaries, "\n") directories = execution.genDirecortiesNames(directory, statsIter) benchmarkArgumentDictionaries = execution.genArgumentDictionaries( LABELS_DICTIONARY, directories, multiclassLabels, @@ -615,6 +630,7 @@ def execClassif(arguments): statsIterRandomStates, metrics, argumentDictionaries, benchmark, nbViews, views, viewsIndices) + print(benchmarkArgumentDictionaries, "\n") nbMulticlass = len(labelsCombinations) execBenchmark(nbCores, statsIter, nbMulticlass, diff --git a/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py b/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py index 21267f61e4d1b580408b8301e33cb00fffdc5494..1e24d982a3c70d1fa5578d4b714f06bf9c297137 100644 --- a/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py +++ b/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py @@ -131,7 +131,7 @@ def getPlausibleDBhdf5(features, pathF, name, NB_CLASS=3, LABELS_NAME="", datasetFile.close() datasetFile = h5py.File(pathF + "Plausible.hdf5", "r") LABELS_DICTIONARY = {0: "No", 1: "Yes"} - return datasetFile, LABELS_DICTIONARY + return datasetFile, LABELS_DICTIONARY, "Plausible" elif NB_CLASS >= 3: firstBound = int(datasetLength / 3) rest = datasetLength - 2 * int(datasetLength / 3)