Skip to content
Snippets Groups Projects
Commit 95cf6d73 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Corrected an early fusion issue due tothe dictionnary usage in monoview classifiers

parent e78c4e02
Branches
Tags
No related merge requests found
Showing
with 24 additions and 17 deletions
...@@ -35,8 +35,8 @@ def paramsToSet(nIter, randomState): ...@@ -35,8 +35,8 @@ def paramsToSet(nIter, randomState):
"""Used for weighted linear early fusion to generate random search sets""" """Used for weighted linear early fusion to generate random search sets"""
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 15), paramsSet.append({"n_estimators": randomState.randint(1, 15),
DecisionTreeClassifier()]) "base_estimator": DecisionTreeClassifier()})
return paramsSet return paramsSet
......
...@@ -29,8 +29,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -29,8 +29,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 300), randomState.choice(["gini", "entropy"]), paramsSet.append({"max_depth": randomState.randint(1, 300),
randomState.choice(["best", "random"])]) "criterion": randomState.choice(["gini", "entropy"]),
"splitter": randomState.choice(["best", "random"])})
return paramsSet return paramsSet
......
...@@ -29,8 +29,10 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -29,8 +29,10 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 20), randomState.choice(["uniform", "distance"]), paramsSet.append({"n_neighbors": randomState.randint(1, 20),
randomState.choice(["auto", "ball_tree", "kd_tree", "brute"]), randomState.choice([1, 2])]) "weights": randomState.choice(["uniform", "distance"]),
"algorithm": randomState.choice(["auto", "ball_tree", "kd_tree", "brute"]),
"p": randomState.choice([1, 2])})
return paramsSet return paramsSet
......
...@@ -30,8 +30,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -30,8 +30,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 300), randomState.randint(1, 300), paramsSet.append({"n_estimators": randomState.randint(1, 300),
randomState.choice(["gini", "entropy"])]) "max_depth": randomState.randint(1, 300),
"criterion": randomState.choice(["gini", "entropy"])})
return paramsSet return paramsSet
......
...@@ -63,7 +63,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -63,7 +63,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.choice(["conjunction", "disjunction"]), randomState.randint(1, 15), randomState.random_sample()]) paramsSet.append({"model_type": randomState.choice(["conjunction", "disjunction"]),
"max_rules": randomState.randint(1, 15),
"p": randomState.random_sample()})
return paramsSet return paramsSet
......
...@@ -28,8 +28,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -28,8 +28,9 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.choice(['log', 'modified_huber']), paramsSet.append({"loss": randomState.choice(['log', 'modified_huber']),
randomState.choice(["l1", "l2", "elasticnet"]), randomState.random_sample()]) "penalty": randomState.choice(["l1", "l2", "elasticnet"]),
"alpha": randomState.random_sample()})
return paramsSet return paramsSet
......
...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 10000), ]) paramsSet.append({"C": randomState.randint(1, 10000), })
return paramsSet return paramsSet
......
...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 10000), randomState.randint(1, 30)]) paramsSet.append({"C": randomState.randint(1, 10000), "degree": randomState.randint(1, 30)})
return paramsSet return paramsSet
......
...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs): ...@@ -26,7 +26,7 @@ def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
def paramsToSet(nIter, randomState): def paramsToSet(nIter, randomState):
paramsSet = [] paramsSet = []
for _ in range(nIter): for _ in range(nIter):
paramsSet.append([randomState.randint(1, 10000), ]) paramsSet.append({"C": randomState.randint(1, 10000), })
return paramsSet return paramsSet
......
...@@ -87,7 +87,7 @@ class WeightedLinear(EarlyFusionClassifier): ...@@ -87,7 +87,7 @@ class WeightedLinear(EarlyFusionClassifier):
def setParams(self, paramsSet): def setParams(self, paramsSet):
self.weights = paramsSet[0] self.weights = paramsSet[0]
self.monoviewClassifiersConfig = dict((str(index), param) for index, param in enumerate(paramsSet[1])) self.monoviewClassifiersConfig = paramsSet[1]
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices) == type(None): if type(viewsIndices) == type(None):
......
...@@ -54,11 +54,11 @@ def intersect(allClassifersNames, directory, viewsIndices, resultsMonoview, clas ...@@ -54,11 +54,11 @@ def intersect(allClassifersNames, directory, viewsIndices, resultsMonoview, clas
if resultMonoview[1][0] in classifiersNames[resultMonoview[0]]: if resultMonoview[1][0] in classifiersNames[resultMonoview[0]]:
classifierIndex = classifiersNames.index(resultMonoview[1][0]) classifierIndex = classifiersNames.index(resultMonoview[1][0])
wrongSets[resultMonoview[0]][classifierIndex] = np.where( wrongSets[resultMonoview[0]][classifierIndex] = np.where(
trainLabels + resultMonoview[1][3][classificationIndices[0]] == 1) trainLabels + resultMonoview[1][3][classificationIndices[0]] == 1)[0]
else: else:
classifiersNames[resultMonoview[0]].append(resultMonoview[1][0]) classifiersNames[resultMonoview[0]].append(resultMonoview[1][0])
wrongSets[resultMonoview[0]].append( wrongSets[resultMonoview[0]].append(
np.where(trainLabels + resultMonoview[1][3][classificationIndices[0]] == 1)) np.where(trainLabels + resultMonoview[1][3][classificationIndices[0]] == 1)[0])
combinations = itertools.combinations_with_replacement(range(len(classifiersNames[0])), nbViews) combinations = itertools.combinations_with_replacement(range(len(classifiersNames[0])), nbViews)
bestLen = length bestLen = length
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment