From 8b9094c9ad72c7c6667d17590349ee04176d17f5 Mon Sep 17 00:00:00 2001
From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr>
Date: Mon, 4 Nov 2019 16:30:19 -0500
Subject: [PATCH] Removed warnings

---
 .../monoview_classifiers/sgd.py                        |  4 +++-
 .../weighted_linear_early_fusion.py                    |  2 +-
 .../mono_multi_view_classifiers/utils/dataset.py       |  8 ++++----
 .../tests/test_utils/test_GetMultiviewDB.py            |  2 +-
 multiview_platform/tests/test_utils/test_dataset.py    | 10 +++++-----
 .../tests/test_utils/test_hyper_parameter_search.py    |  2 +-
 6 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
index 4d77b7fd..b4a0e3d7 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
@@ -37,12 +37,14 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
 
     """
     def __init__(self, random_state=None, loss='hinge',
-                 penalty='l2', alpha=0.0001, **kwargs):
+                 penalty='l2', alpha=0.0001, max_iter=5, tol=None, **kwargs):
 
         super(SGD, self).__init__(
             loss=loss,
             penalty=penalty,
             alpha=alpha,
+            max_iter=5,
+            tol=None,
             random_state=random_state
         )
         self.param_names = ["loss", "penalty", "alpha", "random_state"]
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
index e63ebbb6..159623e4 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
@@ -84,7 +84,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
         example_indices, self.view_indices = get_examples_views_indices(dataset,
                                                                         example_indices,
                                                                         view_indices)
-        if self.view_weights is None or self.view_weights=="None":
+        if self.view_weights is None:
             self.view_weights = np.ones(len(self.view_indices), dtype=float)
         else:
             self.view_weights = np.array(self.view_weights)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
index d2badc5e..2caae541 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
@@ -190,7 +190,7 @@ class Dataset():
             example_indices = example_indices[sorted_indices]
 
             if not self.dataset["View" + str(view_index)].attrs["sparse"]:
-                return self.dataset["View" + str(view_index)][example_indices, :][
+                return self.dataset["View" + str(view_index)][()][example_indices, :][
                        np.argsort(sorted_indices), :]
             else:
                 sparse_mat = sparse.csr_matrix(
@@ -208,11 +208,11 @@ class Dataset():
 
     def get_nb_class(self, example_indices=None):
         example_indices = self.init_example_indces(example_indices)
-        return len(np.unique(self.dataset["Labels"][example_indices]))
+        return len(np.unique(self.dataset["Labels"][()][example_indices]))
 
     def get_labels(self, example_indices=None):
         example_indices = self.init_example_indces(example_indices)
-        return self.dataset["Labels"][example_indices]
+        return self.dataset["Labels"][()][example_indices]
 
     def copy_view(self, target_dataset=None, source_view_name=None,
                   target_view_index=None, example_indices=None):
@@ -273,7 +273,7 @@ class Dataset():
                                    target_view_index=view_index)
         for view_index in range(noisy_dataset["Metadata"].attrs["nbView"]):
             view_key = "View" + str(view_index)
-            view_dset = noisy_dataset.get[view_key]
+            view_dset = noisy_dataset[view_key]
             try:
                 view_limits = self.dataset[
                     "Metadata/View" + str(view_index) + "_limits"][()]
diff --git a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py b/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
index a61bfbf3..a9f5dae8 100644
--- a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
+++ b/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
@@ -21,7 +21,7 @@ class Test_get_classic_db_hdf5(unittest.TestCase):
         self.views = [self.rs.randint(0, 10, size=(self.nb_examples, 7))
                       for _ in range(self.nb_view)]
         self.labels = self.rs.randint(0, self.nb_class, self.nb_examples)
-        self.dataset_file = h5py.File(os.path.join(tmp_path, self.file_name))
+        self.dataset_file = h5py.File(os.path.join(tmp_path, self.file_name), 'w')
         self.view_names = ["ViewN" + str(index) for index in
                            range(len(self.views))]
         self.are_sparse = [False for _ in self.views]
diff --git a/multiview_platform/tests/test_utils/test_dataset.py b/multiview_platform/tests/test_utils/test_dataset.py
index dcfcb353..6125243c 100644
--- a/multiview_platform/tests/test_utils/test_dataset.py
+++ b/multiview_platform/tests/test_utils/test_dataset.py
@@ -22,7 +22,7 @@ class Test_Dataset(unittest.TestCase):
         cls.views = [cls.rs.randint(0, 10, size=(cls.nb_examples, cls.nb_attr))
                      for _ in range(cls.nb_view)]
         cls.labels = cls.rs.randint(0, cls.nb_class, cls.nb_examples)
-        cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name))
+        cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name), "w")
         cls.view_names = ["ViewN" + str(index) for index in range(len(cls.views))]
         cls.are_sparse = [False for _ in cls.views]
         for view_index, (view_name, view, is_sparse) in enumerate(
@@ -50,7 +50,7 @@ class Test_Dataset(unittest.TestCase):
     def test_filter(self):
         """Had to create a new dataset to aviod playing with the class one"""
         file_name = "test_filter.hdf5"
-        dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name))
+        dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name), "w")
         for view_index, (view_name, view, is_sparse) in enumerate(
                 zip(self.view_names, self.views, self.are_sparse)):
             view_dataset = dataset_file_filter.create_dataset(
@@ -155,7 +155,7 @@ class Test_Dataset(unittest.TestCase):
                                  source_view_name="ViewN0",
                                  target_view_index=1)
         self.assertIn("View1", list(new_dataset.keys()))
-        np.testing.assert_array_equal(dataset_object.get_v(0), new_dataset["View1"].value)
+        np.testing.assert_array_equal(dataset_object.get_v(0), new_dataset["View1"][()])
         self.assertEqual(new_dataset["View1"].attrs["name"], "ViewN0")
         new_dataset.close()
         os.remove(os.path.join(tmp_path, "test_copy.hdf5"))
@@ -180,7 +180,7 @@ class Test_Dataset(unittest.TestCase):
 
     def test_select_views_and_labels(self):
         file_name = "test_filter.hdf5"
-        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name))
+        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
         for view_index, (view_name, view, is_sparse) in enumerate(
                 zip(self.view_names, self.views, self.are_sparse)):
             view_dataset = dataset_file_select.create_dataset(
@@ -208,7 +208,7 @@ class Test_Dataset(unittest.TestCase):
 
     def test_add_gaussian_noise(self):
         file_name = "test_noise.hdf5"
-        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name))
+        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
         limits = np.zeros((self.nb_attr, 2))
         limits[:, 1] += 100
         meta_data_grp = dataset_file_select.create_group("Metadata")
diff --git a/multiview_platform/tests/test_utils/test_hyper_parameter_search.py b/multiview_platform/tests/test_utils/test_hyper_parameter_search.py
index b5dfe409..03a9655b 100644
--- a/multiview_platform/tests/test_utils/test_hyper_parameter_search.py
+++ b/multiview_platform/tests/test_utils/test_hyper_parameter_search.py
@@ -55,7 +55,7 @@ class Test_randomized_search(unittest.TestCase):
 
     def test_simple(self):
         best_params, test_folds_preds = hyper_parameter_search.randomized_search(
-            self.dataset, self.labels.value, "multiview", self.random_state, tmp_path,
+            self.dataset, self.labels[()], "multiview", self.random_state, tmp_path,
             weighted_linear_early_fusion, "WeightedLinearEarlyFusion", self.k_folds,
         1, ["accuracy_score", None], 2, {}, learning_indices=self.learning_indices)
 
-- 
GitLab