Skip to content
Snippets Groups Projects
Commit 3f149074 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Added list format for each monoview arg and removed R computing on CGDesc

parent b8c5e863
Branches
Tags
No related merge requests found
Showing
with 101 additions and 107 deletions
...@@ -161,8 +161,8 @@ def initMonoviewExps(benchmark, viewsDictionary, nbClass, kwargsInit): ...@@ -161,8 +161,8 @@ def initMonoviewExps(benchmark, viewsDictionary, nbClass, kwargsInit):
argumentDictionaries["Monoview"] += gen_multiple_args_dictionnaries(nbClass, kwargsInit, classifier, viewName, viewIndex) argumentDictionaries["Monoview"] += gen_multiple_args_dictionnaries(nbClass, kwargsInit, classifier, viewName, viewIndex)
else: else:
arguments = { arguments = {
"args": {classifier + "KWARGS": kwargsInit[ "args": {classifier + "KWARGS": dict((key, value[0]) for key, value in kwargsInit[
classifier + "KWARGSInit"], "feat": viewName, classifier + "KWARGSInit"].items()), "feat": viewName,
"CL_type": classifier, "nbClass": nbClass}, "CL_type": classifier, "nbClass": nbClass},
"viewIndex": viewIndex} "viewIndex": viewIndex}
argumentDictionaries["Monoview"].append(arguments) argumentDictionaries["Monoview"].append(arguments)
...@@ -637,15 +637,10 @@ def execClassif(arguments): ...@@ -637,15 +637,10 @@ def execClassif(arguments):
metrics[metricIndex] = [metric[0], None] metrics[metricIndex] = [metric[0], None]
benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args) benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args)
print(benchmark, "\n")
initKWARGS = initKWARGSFunc(args, benchmark) initKWARGS = initKWARGSFunc(args, benchmark)
dataBaseTime = time.time() - start dataBaseTime = time.time() - start
argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary, argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary,
NB_CLASS, initKWARGS) NB_CLASS, initKWARGS)
print(argumentDictionaries, "\n")
directories = execution.genDirecortiesNames(directory, statsIter) directories = execution.genDirecortiesNames(directory, statsIter)
benchmarkArgumentDictionaries = execution.genArgumentDictionaries( benchmarkArgumentDictionaries = execution.genArgumentDictionaries(
LABELS_DICTIONARY, directories, multiclassLabels, LABELS_DICTIONARY, directories, multiclassLabels,
...@@ -654,7 +649,6 @@ def execClassif(arguments): ...@@ -654,7 +649,6 @@ def execClassif(arguments):
statsIterRandomStates, metrics, statsIterRandomStates, metrics,
argumentDictionaries, benchmark, nbViews, argumentDictionaries, benchmark, nbViews,
views, viewsIndices) views, viewsIndices)
print(benchmarkArgumentDictionaries, "\n")
nbMulticlass = len(labelsCombinations) nbMulticlass = len(labelsCombinations)
execBenchmark(nbCores, statsIter, nbMulticlass, execBenchmark(nbCores, statsIter, nbMulticlass,
......
...@@ -390,7 +390,8 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost): ...@@ -390,7 +390,8 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost):
ones_matrix = np.zeros(y.shape) ones_matrix = np.zeros(y.shape)
ones_matrix[np.multiply(y, self.new_voter.reshape( ones_matrix[np.multiply(y, self.new_voter.reshape(
y.shape)) < 0] = 1 # can np.divide if needed y.shape)) < 0] = 1 # can np.divide if needed
epsilon = np.average(ones_matrix, weights=self.example_weights, axis=0) epsilon = np.average(np.multiply(y, self.new_voter.reshape(
y.shape)), axis=0)
return epsilon return epsilon
def _compute_r(self, y): def _compute_r(self, y):
......
...@@ -56,7 +56,6 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, ...@@ -56,7 +56,6 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices,
labelsString, \ labelsString, \
outputFileName = initConstants(args, X, classificationIndices, labelsNames, outputFileName = initConstants(args, X, classificationIndices, labelsNames,
name, directory) name, directory)
logging.debug("Done:\t Loading data") logging.debug("Done:\t Loading data")
logging.debug( logging.debug(
......
...@@ -89,7 +89,7 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier): ...@@ -89,7 +89,7 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
def formatCmdArgs(args): def formatCmdArgs(args):
"""Used to format kwargs for the parsed args""" """Used to format kwargs for the parsed args"""
kwargsDict = {'n_estimators': args.Ada_n_est, kwargsDict = {'n_estimators': args.Ada_n_est,
'base_estimator': DecisionTreeClassifier(max_depth=1)} 'base_estimator': [DecisionTreeClassifier(max_depth=1)]}
return kwargsDict return kwargsDict
......
...@@ -123,7 +123,7 @@ class AdaboostPregen(AdaBoostClassifier, BaseMonoviewClassifier, ...@@ -123,7 +123,7 @@ class AdaboostPregen(AdaBoostClassifier, BaseMonoviewClassifier,
def formatCmdArgs(args): def formatCmdArgs(args):
"""Used to format kwargs for the parsed args""" """Used to format kwargs for the parsed args"""
kwargsDict = {'n_estimators': args.AdP_n_est, kwargsDict = {'n_estimators': args.AdP_n_est,
'base_estimator': DecisionTreeClassifier(max_depth=1), 'base_estimator': [DecisionTreeClassifier(max_depth=1)],
'n_stumps': args.AdP_stumps} 'n_stumps': args.AdP_stumps}
return kwargsDict return kwargsDict
......
...@@ -24,7 +24,7 @@ class AdaboostPregen10(AdaboostPregen): ...@@ -24,7 +24,7 @@ class AdaboostPregen10(AdaboostPregen):
def formatCmdArgs(args): def formatCmdArgs(args):
"""Used to format kwargs for the parsed args""" """Used to format kwargs for the parsed args"""
kwargsDict = {'n_estimators': args.AdP_n_est, kwargsDict = {'n_estimators': args.AdP_n_est,
'base_estimator': DecisionTreeClassifier(max_depth=1), 'base_estimator': [DecisionTreeClassifier(max_depth=1)],
} }
return kwargsDict return kwargsDict
......
...@@ -108,7 +108,7 @@ class AdaboostPregenTree(AdaBoostClassifier, BaseMonoviewClassifier, ...@@ -108,7 +108,7 @@ class AdaboostPregenTree(AdaBoostClassifier, BaseMonoviewClassifier,
def formatCmdArgs(args): def formatCmdArgs(args):
"""Used to format kwargs for the parsed args""" """Used to format kwargs for the parsed args"""
kwargsDict = {'n_estimators': args.AdPT_n_est, kwargsDict = {'n_estimators': args.AdPT_n_est,
'base_estimator': DecisionTreeClassifier(max_depth=1), 'base_estimator': [DecisionTreeClassifier(max_depth=1)],
'n_stumps': args.AdPT_trees, 'n_stumps': args.AdPT_trees,
"max_depth": args.AdPT_max_depth} "max_depth": args.AdPT_max_depth}
return kwargsDict return kwargsDict
......
...@@ -13,7 +13,7 @@ class CGDesc(ColumnGenerationClassifierQar, BaseMonoviewClassifier): ...@@ -13,7 +13,7 @@ class CGDesc(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
c_bound_choice=True, c_bound_choice=True,
random_start=False, random_start=False,
n_stumps=n_stumps, n_stumps=n_stumps,
use_r=True, use_r=False,
c_bound_sol=True, c_bound_sol=True,
estimators_generator="Stumps" estimators_generator="Stumps"
) )
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment