Skip to content
Snippets Groups Projects
Commit 12f5afa2 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Merge branch 'develop'

parents 8c63b541 149576d3
Branches
Tags
No related merge requests found
Pipeline #4879 passed
Showing
with 186 additions and 165 deletions
import numpy as np
from sklearn.linear_model import Lasso as LassoSK
from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import CustomUniform, CustomRandint
# Author-Info
__author__ = "Baptiste Bauvin"
......
from sklearn.ensemble import RandomForestClassifier
from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import CustomRandint
# Author-Info
__author__ = "Baptiste Bauvin"
......@@ -65,7 +66,8 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
["gini", "entropy"], [random_state]]
self.weird_strings = {}
def get_interpretation(self, directory, base_file_name, y_test, multiclass=False):
def get_interpretation(self, directory, base_file_name, y_test,
multiclass=False):
"""
Parameters
......@@ -78,5 +80,6 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
string for interpretation interpret_string
"""
interpret_string = ""
interpret_string += self.get_feature_importance(directory, base_file_name)
interpret_string += self.get_feature_importance(directory,
base_file_name)
return interpret_string
from sklearn.linear_model import SGDClassifier
from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import CustomUniform
# Author-Info
__author__ = "Baptiste Bauvin"
......
from summit.multiview_platform.monoview_classifiers.additions.SVCClassifier import \
SVCClassifier
from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import CustomUniform
# Author-Info
__author__ = "Baptiste Bauvin"
......
from summit.multiview_platform.monoview_classifiers.additions.SVCClassifier import \
SVCClassifier
from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import \
CustomUniform, CustomRandint
# Author-Info
__author__ = "Baptiste Bauvin"
......
from summit.multiview_platform.monoview_classifiers.additions.SVCClassifier import \
SVCClassifier
from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
from ..monoview.monoview_utils import BaseMonoviewClassifier
from summit.multiview_platform.utils.hyper_parameter_search import CustomUniform
# Author-Info
__author__ = "Baptiste Bauvin"
......
......@@ -102,7 +102,8 @@ def save_results(string_analysis, images_analysis, output_file_name,
"""
logging.info(string_analysis)
secure_file_path(output_file_name)
output_text_file = open(output_file_name + 'summary.txt', 'w', encoding="utf-8")
output_text_file = open(output_file_name + 'summary.txt', 'w',
encoding="utf-8")
output_text_file.write(string_analysis)
output_text_file.close()
np.savetxt(output_file_name + "confusion_matrix.csv", confusion_matrix,
......@@ -308,11 +309,11 @@ def exec_multiview(directory, dataset_var, name, classification_indices,
logging.debug("Start:\t Predicting")
train_pred = classifier.predict(dataset_var,
example_indices=learning_indices,
sample_indices=learning_indices,
view_indices=views_indices)
pred_beg = time.monotonic()
test_pred = classifier.predict(dataset_var,
example_indices=validation_indices,
sample_indices=validation_indices,
view_indices=views_indices)
pred_duration = time.monotonic() - pred_beg
full_pred = np.zeros(dataset_var.get_labels().shape, dtype=int) - 100
......@@ -335,7 +336,8 @@ def exec_multiview(directory, dataset_var, name, classification_indices,
hps_method=hps_method,
metrics_dict=metrics,
n_iter=n_iter,
class_label_names=list(labels_dictionary.values()),
class_label_names=list(
labels_dictionary.values()),
pred=full_pred,
directory=directory,
base_file_name=base_file_name,
......@@ -348,7 +350,8 @@ def exec_multiview(directory, dataset_var, name, classification_indices,
logging.info("Done:\t Result Analysis for " + cl_type)
logging.debug("Start:\t Saving preds")
save_results(string_analysis, images_analysis, output_file_name, confusion_matrix)
save_results(string_analysis, images_analysis, output_file_name,
confusion_matrix)
logging.debug("Start:\t Saving preds")
return MultiviewResult(cl_type, classifier_config, metrics_scores,
......
from .. import multiview_classifiers
from abc import abstractmethod
import numpy as np
from .. import monoview_classifiers
from ..utils.base import BaseClassifier, ResultAnalyser
from ..utils.dataset import RAMDataset, get_examples_views_indices
from ..utils.dataset import RAMDataset
# class FakeEstimator():
#
# def predict(self, X, example_indices=None, view_indices=None):
# return np.zeros(example_indices.shape[0])
# def predict(self, X, sample_indices=None, view_indices=None):
# return np.zeros(sample_indices.shape[0])
class BaseMultiviewClassifier(BaseClassifier):
......@@ -32,16 +33,21 @@ class BaseMultiviewClassifier(BaseClassifier):
self.used_views = None
@abstractmethod
def fit(self, X, y, train_indices=None, view_indices=None): # pragma: no cover
def fit(self, X, y, train_indices=None,
view_indices=None): # pragma: no cover
pass
@abstractmethod
def predict(self, X, example_indices=None, view_indices=None): # pragma: no cover
def predict(self, X, sample_indices=None,
view_indices=None): # pragma: no cover
pass
def _check_views(self, view_indices): # pragma: no cover
if self.used_views is not None and not np.array_equal(np.sort(self.used_views), np.sort(view_indices)):
raise ValueError('Used {} views to fit, and trying to predict on {}'.format(self.used_views, view_indices))
if self.used_views is not None and not np.array_equal(
np.sort(self.used_views), np.sort(view_indices)):
raise ValueError(
'Used {} views to fit, and trying to predict on {}'.format(
self.used_views, view_indices))
# def to_str(self, param_name):
# if param_name in self.weird_strings:
......@@ -74,7 +80,8 @@ class BaseMultiviewClassifier(BaseClassifier):
for class_index in range(n_classes)],
are_sparse=False,
name="mc_dset",
labels_names=[str(class_index) for class_index in range(n_classes)],
labels_names=[str(class_index)
for class_index in range(n_classes)],
view_names=["V0", "V1"],
)
......@@ -145,9 +152,6 @@ def get_monoview_classifier(classifier_name, multiclass=False):
return classifier_class
from .. import multiview_classifiers
class MultiviewResult(object):
def __init__(self, classifier_name, classifier_config,
metrics_scores, full_labels, hps_duration, fit_duration,
......@@ -169,7 +173,7 @@ class MultiviewResult(object):
multiview_classifier_module.classifier_class_name)(
42, **self.classifier_config)
return multiview_classifier.short_name
except:
except BaseException:
return self.classifier_name
......@@ -181,8 +185,10 @@ class MultiviewResultAnalyzer(ResultAnalyser):
database_name, nb_cores, duration):
if hps_method.endswith("equiv"):
n_iter = n_iter * len(view_names)
ResultAnalyser.__init__(self, classifier, classification_indices, k_folds,
hps_method, metrics_dict, n_iter, class_label_names,
ResultAnalyser.__init__(self, classifier, classification_indices,
k_folds,
hps_method, metrics_dict, n_iter,
class_label_names,
pred, directory,
base_file_name, labels, database_name,
nb_cores, duration)
......@@ -190,7 +196,8 @@ class MultiviewResultAnalyzer(ResultAnalyser):
self.view_names = view_names
def get_base_string(self, ):
return "Multiview classification on {} with {}\n\n".format(self.database_name,
return "Multiview classification on {} with {}\n\n".format(
self.database_name,
self.classifier_name)
def get_view_specific_info(self):
......
......@@ -7,7 +7,7 @@ from .fusion_utils import BaseFusionClassifier
from ...multiview.multiview_utils import ConfigGenerator, \
get_available_monoview_classifiers, \
BaseMultiviewClassifier
from ...utils.dataset import get_examples_views_indices
from ...utils.dataset import get_samples_views_indices
class DiversityFusionClassifier(BaseMultiviewClassifier,
......@@ -27,7 +27,7 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
self.classifier_configs = classifier_configs
def fit(self, X, y, train_indices=None, view_indices=None):
train_indices, view_indices = get_examples_views_indices(X,
train_indices, view_indices = get_samples_views_indices(X,
train_indices,
view_indices)
self.used_views = view_indices
......@@ -52,21 +52,21 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
self.choose_combination(X, y, train_indices, view_indices)
return self
def predict(self, X, example_indices=None, view_indices=None):
def predict(self, X, sample_indices=None, view_indices=None):
"""Just a weighted majority vote"""
example_indices, view_indices = get_examples_views_indices(X,
example_indices,
sample_indices, view_indices = get_samples_views_indices(X,
sample_indices,
view_indices)
self._check_views(view_indices)
nb_class = X.get_nb_class()
if nb_class > 2:
nb_class = 3
votes = np.zeros((len(example_indices), nb_class), dtype=float)
votes = np.zeros((len(sample_indices), nb_class), dtype=float)
monoview_predictions = [
monoview_estimator.predict(X.get_v(view_idx, example_indices))
monoview_estimator.predict(X.get_v(view_idx, sample_indices))
for view_idx, monoview_estimator
in zip(view_indices, self.monoview_estimators)]
for idx, example_index in enumerate(example_indices):
for idx, sample_index in enumerate(sample_indices):
for monoview_estimator_index, monoview_prediciton in enumerate(
monoview_predictions):
if int(monoview_prediciton[idx]) == -100:
......@@ -76,20 +76,20 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
predicted_labels = np.argmax(votes, axis=1)
return predicted_labels
def get_classifiers_decisions(self, X, view_indices, examples_indices):
def get_classifiers_decisions(self, X, view_indices, samples_indices):
classifiers_decisions = np.zeros((len(self.monoview_estimators),
len(view_indices),
len(examples_indices)))
len(samples_indices)))
for estimator_idx, estimator in enumerate(self.monoview_estimators):
for idx, view_index in enumerate(view_indices):
classifiers_decisions[estimator_idx, idx, :] = estimator[
idx].predict(X.get_v(view_index, examples_indices))
idx].predict(X.get_v(view_index, samples_indices))
return classifiers_decisions
def init_combinations(self, X, example_indices, view_indices):
def init_combinations(self, X, sample_indices, view_indices):
classifiers_decisions = self.get_classifiers_decisions(X, view_indices,
example_indices)
nb_classifiers, nb_views, n_examples = classifiers_decisions.shape
sample_indices)
nb_classifiers, nb_views, n_samples = classifiers_decisions.shape
combinations = itertools.combinations_with_replacement(
range(nb_classifiers),
nb_views)
......@@ -104,15 +104,15 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
class GlobalDiversityFusionClassifier(DiversityFusionClassifier):
def choose_combination(self, X, y, examples_indices, view_indices):
def choose_combination(self, X, y, samples_indices, view_indices):
combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
X, examples_indices, view_indices)
X, samples_indices, view_indices)
for combinationsIndex, combination in enumerate(combinations):
combis[combinationsIndex] = combination
div_measure[combinationsIndex] = self.diversity_measure(
classifiers_decisions,
combination,
y[examples_indices])
y[samples_indices])
best_combi_index = np.argmax(div_measure)
best_combination = combis[best_combi_index]
self.monoview_estimators = [
......@@ -123,9 +123,9 @@ class GlobalDiversityFusionClassifier(DiversityFusionClassifier):
class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
def choose_combination(self, X, y, examples_indices, view_indices):
def choose_combination(self, X, y, samples_indices, view_indices):
combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
X, examples_indices, view_indices)
X, samples_indices, view_indices)
for combinations_index, combination in enumerate(combinations):
combis[combinations_index] = combination
combi_with_view = [(viewIndex, combiIndex) for viewIndex, combiIndex
......@@ -140,9 +140,11 @@ class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
view_index_2, classifier_index_2) = binome
couple_diversity = np.mean(
self.diversity_measure(
classifiers_decisions[classifier_index_1, view_index_1],
classifiers_decisions[classifier_index_2, view_index_2],
y[examples_indices])
classifiers_decisions[classifier_index_1,
view_index_1],
classifiers_decisions[classifier_index_2,
view_index_2],
y[samples_indices])
)
couple_diversities[binome_index] = couple_diversity
div_measure[combinations_index] = np.mean(couple_diversities)
......
import numpy as np
from .late_fusion_utils import LateFusionClassifier
from ...monoview.monoview_utils import CustomRandint
from ...utils.dataset import get_examples_views_indices
from ...utils.hyper_parameter_search import CustomRandint
from ...utils.dataset import get_samples_views_indices
class BaseJumboFusion(LateFusionClassifier):
......@@ -23,18 +23,18 @@ class BaseJumboFusion(LateFusionClassifier):
self.nb_monoview_per_view = nb_monoview_per_view
LateFusionClassifier.set_params(self, **params)
def predict(self, X, example_indices=None, view_indices=None):
example_indices, view_indices = get_examples_views_indices(X,
example_indices,
def predict(self, X, sample_indices=None, view_indices=None):
sample_indices, view_indices = get_samples_views_indices(X,
sample_indices,
view_indices)
self._check_views(view_indices)
monoview_decisions = self.predict_monoview(X,
example_indices=example_indices,
sample_indices=sample_indices,
view_indices=view_indices)
return self.aggregation_estimator.predict(monoview_decisions)
def fit(self, X, y, train_indices=None, view_indices=None):
train_indices, view_indices = get_examples_views_indices(X,
train_indices, view_indices = get_samples_views_indices(X,
train_indices,
view_indices)
self.used_views = view_indices
......@@ -43,7 +43,7 @@ class BaseJumboFusion(LateFusionClassifier):
self.fit_monoview_estimators(X, y, train_indices=train_indices,
view_indices=view_indices)
monoview_decisions = self.predict_monoview(X,
example_indices=train_indices,
sample_indices=train_indices,
view_indices=view_indices)
self.aggregation_estimator.fit(monoview_decisions, y[train_indices])
return self
......@@ -70,8 +70,8 @@ class BaseJumboFusion(LateFusionClassifier):
enumerate(self.monoview_estimators)]
return self
def predict_monoview(self, X, example_indices=None, view_indices=None):
monoview_decisions = np.zeros((len(example_indices),
def predict_monoview(self, X, sample_indices=None, view_indices=None):
monoview_decisions = np.zeros((len(sample_indices),
len(view_indices) * len(
self.classifiers_names)))
for idx, view_estimators in enumerate(self.monoview_estimators):
......@@ -79,5 +79,5 @@ class BaseJumboFusion(LateFusionClassifier):
monoview_decisions[:, len(
self.classifiers_names) * idx + estimator_index] = estimator.predict(
X.get_v(view_indices[idx],
example_indices))
sample_indices))
return monoview_decisions
......@@ -3,7 +3,7 @@ import numpy as np
from .fusion_utils import BaseFusionClassifier
from ...multiview.multiview_utils import BaseMultiviewClassifier, \
get_available_monoview_classifiers, ConfigGenerator
from ...utils.dataset import get_examples_views_indices
from ...utils.dataset import get_samples_views_indices
class ClassifierDistribution:
......@@ -94,7 +94,7 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
np.arange(1000)]
def fit(self, X, y, train_indices=None, view_indices=None):
train_indices, view_indices = get_examples_views_indices(X,
train_indices, view_indices = get_samples_views_indices(X,
train_indices,
view_indices)
self.used_views = view_indices
......@@ -148,8 +148,12 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
for _ in range(nb_clfs)]
if isinstance(self.classifier_configs, ConfigDistribution):
self.classifier_configs = [{classifier_name : config[classifier_name]} for config, classifier_name in zip(self.classifier_configs.draw(nb_clfs,
self.rs), self.classifiers_names)]
self.classifier_configs = [
{classifier_name: config[classifier_name]} for
config, classifier_name in
zip(self.classifier_configs.draw(nb_clfs,
self.rs),
self.classifiers_names)]
elif isinstance(self.classifier_configs, dict):
self.classifier_configs = [
{classifier_name: self.classifier_configs[classifier_name]} for
......
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
def get_names(classed_list):
return np.array([object_.__class__.__name__ for object_ in classed_list])
# class BaseMultiviewClassifier(BaseEstimator, ClassifierMixin):
#
# def __init__(self, random_state):
......@@ -56,9 +54,9 @@ def get_names(classed_list):
#
# def get_train_views_indices(dataset, train_indices, view_indices, ):
# """This function is used to get all the examples indices and view indices if needed"""
# """This function is used to get all the samples indices and view indices if needed"""
# if view_indices is None:
# view_indices = np.arange(dataset.nb_view)
# if train_indices is None:
# train_indices = range(dataset.get_nb_examples())
# train_indices = range(dataset.get_nb_samples())
# return train_indices, view_indices
......@@ -2,7 +2,7 @@ import numpy as np
from ..multiview_classifiers.additions.late_fusion_utils import \
LateFusionClassifier
from ..utils.dataset import get_examples_views_indices
from ..utils.dataset import get_samples_views_indices
classifier_class_name = "BayesianInferenceClassifier"
......@@ -19,9 +19,9 @@ class BayesianInferenceClassifier(LateFusionClassifier):
weights=weights,
rs=rs)
def predict(self, X, example_indices=None, view_indices=None):
example_indices, view_indices = get_examples_views_indices(X,
example_indices,
def predict(self, X, sample_indices=None, view_indices=None):
sample_indices, view_indices = get_samples_views_indices(X,
sample_indices,
view_indices)
self._check_views(view_indices)
if sum(self.weights) != 1.0:
......@@ -32,7 +32,7 @@ class BayesianInferenceClassifier(LateFusionClassifier):
view_scores.append(np.power(
self.monoview_estimators[index].predict_proba(
X.get_v(view_index,
example_indices)),
sample_indices)),
self.weights[index]))
view_scores = np.array(view_scores)
predicted_labels = np.argmax(np.prod(view_scores, axis=0), axis=1)
......
......@@ -9,15 +9,15 @@ classifier_class_name = "DifficultyFusion"
class DifficultyFusion(GlobalDiversityFusionClassifier):
def diversity_measure(self, classifiers_decisions, combination, y):
_, nb_view, nb_examples = classifiers_decisions.shape
scores = np.zeros((nb_view, nb_examples), dtype=int)
_, nb_view, nb_samples = classifiers_decisions.shape
scores = np.zeros((nb_view, nb_samples), dtype=int)
for view_index, classifier_index in enumerate(combination):
scores[view_index, :] = np.logical_not(
np.logical_xor(classifiers_decisions[classifier_index,
view_index],
y)
)
# Table of the nuber of views that succeeded for each example :
# Table of the nuber of views that succeeded for each sample :
difficulty_scores = np.sum(scores, axis=0)
difficulty_score = np.var(
......
......@@ -9,8 +9,8 @@ classifier_class_name = "EntropyFusion"
class EntropyFusion(GlobalDiversityFusionClassifier):
def diversity_measure(self, classifiers_decisions, combination, y):
_, nb_view, nb_examples = classifiers_decisions.shape
scores = np.zeros((nb_view, nb_examples), dtype=int)
_, nb_view, nb_samples = classifiers_decisions.shape
scores = np.zeros((nb_view, nb_samples), dtype=int)
for view_index, classifier_index in enumerate(combination):
scores[view_index] = np.logical_not(
np.logical_xor(
......@@ -18,7 +18,7 @@ class EntropyFusion(GlobalDiversityFusionClassifier):
y)
)
entropy_scores = np.sum(scores, axis=0)
nb_view_matrix = np.zeros((nb_examples),
nb_view_matrix = np.zeros((nb_samples),
dtype=int) + nb_view - entropy_scores
entropy_score = np.mean(
np.minimum(entropy_scores, nb_view_matrix).astype(float) / (
......
......@@ -2,7 +2,7 @@ import numpy as np
from ..multiview_classifiers.additions.late_fusion_utils import \
LateFusionClassifier
from ..utils.dataset import get_examples_views_indices
from ..utils.dataset import get_samples_views_indices
classifier_class_name = "MajorityVoting"
......@@ -22,27 +22,27 @@ class MajorityVoting(LateFusionClassifier):
weights=weights,
rs=rs)
def predict(self, X, example_indices=None, view_indices=None):
examples_indices, view_indices = get_examples_views_indices(X,
example_indices,
def predict(self, X, sample_indices=None, view_indices=None):
samples_indices, view_indices = get_samples_views_indices(X,
sample_indices,
view_indices)
self._check_views(view_indices)
n_examples = len(examples_indices)
votes = np.zeros((n_examples, X.get_nb_class(example_indices)),
n_samples = len(samples_indices)
votes = np.zeros((n_samples, X.get_nb_class(sample_indices)),
dtype=float)
monoview_decisions = np.zeros((len(examples_indices), X.nb_view),
monoview_decisions = np.zeros((len(samples_indices), X.nb_view),
dtype=int)
for index, view_index in enumerate(view_indices):
monoview_decisions[:, index] = self.monoview_estimators[
index].predict(
X.get_v(view_index, examples_indices))
for example_index in range(n_examples):
X.get_v(view_index, samples_indices))
for sample_index in range(n_samples):
for view_index, feature_classification in enumerate(
monoview_decisions[example_index, :]):
votes[example_index, feature_classification] += self.weights[
monoview_decisions[sample_index, :]):
votes[sample_index, feature_classification] += self.weights[
view_index]
nb_maximum = len(
np.where(votes[example_index] == max(votes[example_index]))[0])
np.where(votes[sample_index] == max(votes[sample_index]))[0])
if nb_maximum == X.nb_view:
raise VotingIndecision(
"Majority voting can't decide, each classifier has voted for a different class")
......
from sklearn.svm import SVC
from .additions.jumbo_fusion_utils import BaseJumboFusion
from ..monoview.monoview_utils import CustomUniform, CustomRandint
from ..utils.hyper_parameter_search import CustomUniform, CustomRandint
classifier_class_name = "SVMJumboFusion"
......@@ -32,5 +32,6 @@ class SVMJumboFusion(BaseJumboFusion):
self.C = C
self.degree = degree
self.kernel = kernel
self.aggregation_estimator.set_params(C=C, kernel=kernel, degree=degree)
self.aggregation_estimator.set_params(
C=C, kernel=kernel, degree=degree)
return self
import numpy as np
from summit.multiview_platform import monoview_classifiers
from .additions.fusion_utils import BaseFusionClassifier
from ..multiview.multiview_utils import get_available_monoview_classifiers, \
BaseMultiviewClassifier, ConfigGenerator
from ..utils.dataset import get_examples_views_indices
from ..utils.dataset import get_samples_views_indices
from ..utils.multiclass import get_mc_estim, MultiClassWrapper
# from ..utils.dataset import get_v
......@@ -42,7 +41,8 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
# self.monoview_classifier_name)
# monoview_classifier_class = getattr(monoview_classifier_module,
# monoview_classifier_module.classifier_class_name)
self.monoview_classifier = self.init_monoview_estimator(monoview_classifier_name, monoview_classifier_config)
self.monoview_classifier = self.init_monoview_estimator(
monoview_classifier_name, monoview_classifier_config)
self.param_names = ["monoview_classifier_name",
"monoview_classifier_config"]
self.distribs = [get_available_monoview_classifiers(),
......@@ -80,18 +80,18 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
self.monoview_classifier_config = self.monoview_classifier.get_params()
return self
def predict(self, X, example_indices=None, view_indices=None):
_, X = self.transform_data_to_monoview(X, example_indices, view_indices)
def predict(self, X, sample_indices=None, view_indices=None):
_, X = self.transform_data_to_monoview(X, sample_indices, view_indices)
self._check_views(self.view_indices)
predicted_labels = self.monoview_classifier.predict(X)
return predicted_labels
def transform_data_to_monoview(self, dataset, example_indices,
def transform_data_to_monoview(self, dataset, sample_indices,
view_indices):
"""Here, we extract the data from the HDF5 dataset file and store all
the concatenated views in one variable"""
example_indices, self.view_indices = get_examples_views_indices(dataset,
example_indices,
sample_indices, self.view_indices = get_samples_views_indices(dataset,
sample_indices,
view_indices)
if self.view_weights is None:
self.view_weights = np.ones(len(self.view_indices), dtype=float)
......@@ -99,16 +99,15 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
self.view_weights = np.array(self.view_weights)
self.view_weights /= float(np.sum(self.view_weights))
X = self.hdf5_to_monoview(dataset, example_indices)
return example_indices, X
X = self.hdf5_to_monoview(dataset, sample_indices)
return sample_indices, X
def hdf5_to_monoview(self, dataset, examples):
"""Here, we concatenate the views for the asked examples """
def hdf5_to_monoview(self, dataset, samples):
"""Here, we concatenate the views for the asked samples """
monoview_data = np.concatenate(
[dataset.get_v(view_idx, examples)
[dataset.get_v(view_idx, samples)
for view_weight, (index, view_idx)
in zip(self.view_weights, enumerate(self.view_indices))]
, axis=1)
in zip(self.view_weights, enumerate(self.view_indices))], axis=1)
return monoview_data
# def set_monoview_classifier_config(self, monoview_classifier_name, monoview_classifier_config):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment