Skip to content
Snippets Groups Projects
Commit 8b9094c9 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Removed warnings

parent cbef2800
Branches
Tags
No related merge requests found
Pipeline #3575 passed
......@@ -37,12 +37,14 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
"""
def __init__(self, random_state=None, loss='hinge',
penalty='l2', alpha=0.0001, **kwargs):
penalty='l2', alpha=0.0001, max_iter=5, tol=None, **kwargs):
super(SGD, self).__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
max_iter=5,
tol=None,
random_state=random_state
)
self.param_names = ["loss", "penalty", "alpha", "random_state"]
......
......@@ -84,7 +84,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
example_indices, self.view_indices = get_examples_views_indices(dataset,
example_indices,
view_indices)
if self.view_weights is None or self.view_weights=="None":
if self.view_weights is None:
self.view_weights = np.ones(len(self.view_indices), dtype=float)
else:
self.view_weights = np.array(self.view_weights)
......
......@@ -190,7 +190,7 @@ class Dataset():
example_indices = example_indices[sorted_indices]
if not self.dataset["View" + str(view_index)].attrs["sparse"]:
return self.dataset["View" + str(view_index)][example_indices, :][
return self.dataset["View" + str(view_index)][()][example_indices, :][
np.argsort(sorted_indices), :]
else:
sparse_mat = sparse.csr_matrix(
......@@ -208,11 +208,11 @@ class Dataset():
def get_nb_class(self, example_indices=None):
example_indices = self.init_example_indces(example_indices)
return len(np.unique(self.dataset["Labels"][example_indices]))
return len(np.unique(self.dataset["Labels"][()][example_indices]))
def get_labels(self, example_indices=None):
example_indices = self.init_example_indces(example_indices)
return self.dataset["Labels"][example_indices]
return self.dataset["Labels"][()][example_indices]
def copy_view(self, target_dataset=None, source_view_name=None,
target_view_index=None, example_indices=None):
......@@ -273,7 +273,7 @@ class Dataset():
target_view_index=view_index)
for view_index in range(noisy_dataset["Metadata"].attrs["nbView"]):
view_key = "View" + str(view_index)
view_dset = noisy_dataset.get[view_key]
view_dset = noisy_dataset[view_key]
try:
view_limits = self.dataset[
"Metadata/View" + str(view_index) + "_limits"][()]
......
......@@ -21,7 +21,7 @@ class Test_get_classic_db_hdf5(unittest.TestCase):
self.views = [self.rs.randint(0, 10, size=(self.nb_examples, 7))
for _ in range(self.nb_view)]
self.labels = self.rs.randint(0, self.nb_class, self.nb_examples)
self.dataset_file = h5py.File(os.path.join(tmp_path, self.file_name))
self.dataset_file = h5py.File(os.path.join(tmp_path, self.file_name), 'w')
self.view_names = ["ViewN" + str(index) for index in
range(len(self.views))]
self.are_sparse = [False for _ in self.views]
......
......@@ -22,7 +22,7 @@ class Test_Dataset(unittest.TestCase):
cls.views = [cls.rs.randint(0, 10, size=(cls.nb_examples, cls.nb_attr))
for _ in range(cls.nb_view)]
cls.labels = cls.rs.randint(0, cls.nb_class, cls.nb_examples)
cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name))
cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name), "w")
cls.view_names = ["ViewN" + str(index) for index in range(len(cls.views))]
cls.are_sparse = [False for _ in cls.views]
for view_index, (view_name, view, is_sparse) in enumerate(
......@@ -50,7 +50,7 @@ class Test_Dataset(unittest.TestCase):
def test_filter(self):
"""Had to create a new dataset to aviod playing with the class one"""
file_name = "test_filter.hdf5"
dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name))
dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name), "w")
for view_index, (view_name, view, is_sparse) in enumerate(
zip(self.view_names, self.views, self.are_sparse)):
view_dataset = dataset_file_filter.create_dataset(
......@@ -155,7 +155,7 @@ class Test_Dataset(unittest.TestCase):
source_view_name="ViewN0",
target_view_index=1)
self.assertIn("View1", list(new_dataset.keys()))
np.testing.assert_array_equal(dataset_object.get_v(0), new_dataset["View1"].value)
np.testing.assert_array_equal(dataset_object.get_v(0), new_dataset["View1"][()])
self.assertEqual(new_dataset["View1"].attrs["name"], "ViewN0")
new_dataset.close()
os.remove(os.path.join(tmp_path, "test_copy.hdf5"))
......@@ -180,7 +180,7 @@ class Test_Dataset(unittest.TestCase):
def test_select_views_and_labels(self):
file_name = "test_filter.hdf5"
dataset_file_select = h5py.File(os.path.join(tmp_path, file_name))
dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
for view_index, (view_name, view, is_sparse) in enumerate(
zip(self.view_names, self.views, self.are_sparse)):
view_dataset = dataset_file_select.create_dataset(
......@@ -208,7 +208,7 @@ class Test_Dataset(unittest.TestCase):
def test_add_gaussian_noise(self):
file_name = "test_noise.hdf5"
dataset_file_select = h5py.File(os.path.join(tmp_path, file_name))
dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
limits = np.zeros((self.nb_attr, 2))
limits[:, 1] += 100
meta_data_grp = dataset_file_select.create_group("Metadata")
......
......@@ -55,7 +55,7 @@ class Test_randomized_search(unittest.TestCase):
def test_simple(self):
best_params, test_folds_preds = hyper_parameter_search.randomized_search(
self.dataset, self.labels.value, "multiview", self.random_state, tmp_path,
self.dataset, self.labels[()], "multiview", self.random_state, tmp_path,
weighted_linear_early_fusion, "WeightedLinearEarlyFusion", self.k_folds,
1, ["accuracy_score", None], 2, {}, learning_indices=self.learning_indices)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment