diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
index 54a4a52e94e9f76c9db07d3b246b43bb4d7ce199..5cad4079c46ad14e20ccde8cd5591658e586fde2 100644
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
@@ -1,31 +1,27 @@
 import errno
+import itertools
 import logging
-import math
 import os
 import pkgutil
 import time
 import traceback
 
 import matplotlib
-import itertools
 import numpy as np
-from joblib import Parallel, delayed
 from sklearn.tree import DecisionTreeClassifier
 
 # Import own modules
 from . import monoview_classifiers
 from . import multiview_classifiers
-from .multiview.exec_multiview import exec_multiview, exec_multiview_multicore
-from .monoview.exec_classif_mono_view import exec_monoview, exec_monoview_multicore
-from .utils.dataset import delete_HDF5
+from .monoview.exec_classif_mono_view import exec_monoview
+from .multiview.exec_multiview import exec_multiview
 from .result_analysis import get_results, plot_results_noise, analyze_iterations
-from .utils import execution, dataset, multiclass, configuration
+from .utils import execution, dataset, configuration
+from .utils.dataset import delete_HDF5
 
 matplotlib.use(
     'Agg')  # Anti-Grain Geometry C++ library to make a raster (pixel) image of the figure
 
-
-
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
@@ -58,59 +54,63 @@ def init_benchmark(cl_type, monoview_algos, multiview_algos, args):
     """
     benchmark = {"monoview": {}, "multiview": {}}
 
-
     if "monoview" in cl_type:
         if monoview_algos == ['all']:
             benchmark["monoview"] = [name for _, name, isPackage in
-                                     pkgutil.iter_modules(monoview_classifiers.__path__)
+                                     pkgutil.iter_modules(
+                                         monoview_classifiers.__path__)
                                      if not isPackage]
 
         else:
             benchmark["monoview"] = monoview_algos
 
     if "multiview" in cl_type:
-        if multiview_algos==["all"]:
+        if multiview_algos == ["all"]:
             benchmark["multiview"] = [name for _, name, isPackage in
-                                     pkgutil.iter_modules(multiview_classifiers.__path__)
-                                     if not isPackage]
+                                      pkgutil.iter_modules(
+                                          multiview_classifiers.__path__)
+                                      if not isPackage]
         else:
             benchmark["multiview"] = multiview_algos
     return benchmark
 
 
 def init_argument_dictionaries(benchmark, views_dictionary,
-                                nb_class, init_kwargs):
+                               nb_class, init_kwargs):
     argument_dictionaries = {"monoview": [], "multiview": []}
     if benchmark["monoview"]:
         argument_dictionaries["monoview"] = init_monoview_exps(
-                                                   benchmark["monoview"],
-                                                   views_dictionary,
-                                                   nb_class,
-                                                   init_kwargs["monoview"])
+            benchmark["monoview"],
+            views_dictionary,
+            nb_class,
+            init_kwargs["monoview"])
     if benchmark["multiview"]:
-        argument_dictionaries["multiview"] = init_multiview_exps(benchmark["multiview"],
-                                                   views_dictionary,
-                                                   nb_class,
-                                                   init_kwargs["multiview"])
+        argument_dictionaries["multiview"] = init_multiview_exps(
+            benchmark["multiview"],
+            views_dictionary,
+            nb_class,
+            init_kwargs["multiview"])
     return argument_dictionaries
 
 
-def init_multiview_exps(classifier_names, views_dictionary, nb_class, kwargs_init):
+def init_multiview_exps(classifier_names, views_dictionary, nb_class,
+                        kwargs_init):
     multiview_arguments = []
     for classifier_name in classifier_names:
         if multiple_args(get_path_dict(kwargs_init[classifier_name])):
             multiview_arguments += gen_multiple_args_dictionnaries(
-                                                                  nb_class,
-                                                                  kwargs_init,
-                                                                  classifier_name,
-                                                                  views_dictionary=views_dictionary,
-                                                                  framework="multiview")
+                nb_class,
+                kwargs_init,
+                classifier_name,
+                views_dictionary=views_dictionary,
+                framework="multiview")
         else:
             arguments = get_path_dict(kwargs_init[classifier_name])
-            multiview_arguments += [gen_single_multiview_arg_dictionary(classifier_name,
-                                                                        arguments,
-                                                                        nb_class,
-                                                                        views_dictionary=views_dictionary)]
+            multiview_arguments += [
+                gen_single_multiview_arg_dictionary(classifier_name,
+                                                    arguments,
+                                                    nb_class,
+                                                    views_dictionary=views_dictionary)]
     return multiview_arguments
 
 
@@ -162,7 +162,7 @@ def gen_single_monoview_arg_dictionary(classifier_name, arguments, nb_class,
                                        view_index, view_name):
     if classifier_name in arguments:
         classifier_config = dict((key, value[0]) for key, value in arguments[
-                            classifier_name].items())
+            classifier_name].items())
     else:
         classifier_config = {}
     return {classifier_name: classifier_config,
@@ -172,7 +172,7 @@ def gen_single_monoview_arg_dictionary(classifier_name, arguments, nb_class,
             "nb_class": nb_class}
 
 
-def gen_single_multiview_arg_dictionary(classifier_name,arguments,nb_class,
+def gen_single_multiview_arg_dictionary(classifier_name, arguments, nb_class,
                                         views_dictionary=None):
     return {"classifier_name": classifier_name,
             "view_names": list(views_dictionary.keys()),
@@ -210,11 +210,11 @@ def set_element(dictionary, path, value):
 
 def multiple_args(classifier_configuration):
     """Checks if multiple values were provided for at least one arg"""
-    listed_args = [type(value) == list and len(value)>1 for key, value in
+    listed_args = [type(value) == list and len(value) > 1 for key, value in
                    classifier_configuration.items()]
     if True in listed_args:
         return True
-    else: 
+    else:
         return False
 
 
@@ -223,7 +223,8 @@ def get_path_dict(multiview_classifier_args):
     the path to the value.
     If given {"key1":{"key1_1":value1}, "key2":value2}, it will return
     {"key1.key1_1":value1, "key2":value2}"""
-    path_dict = dict((key, value) for key, value in multiview_classifier_args.items())
+    path_dict = dict(
+        (key, value) for key, value in multiview_classifier_args.items())
     paths = is_dict_in(path_dict)
     while paths:
         for path in paths:
@@ -283,9 +284,11 @@ def gen_multiple_kwargs_combinations(cl_kwrags):
     reduced_listed_values = [
         [_ if type(_) not in reduce_dict else reduce_dict[type(_)] for _ in
          list_] for list_ in listed_values]
-    reduced_values_cartesian_prod = [_ for _ in itertools.product(*reduced_listed_values)]
-    reduced_kwargs_combination = [dict((key, value) for key, value in zip(keys, values))
-                          for values in reduced_values_cartesian_prod]
+    reduced_values_cartesian_prod = [_ for _ in
+                                     itertools.product(*reduced_listed_values)]
+    reduced_kwargs_combination = [
+        dict((key, value) for key, value in zip(keys, values))
+        for values in reduced_values_cartesian_prod]
     return kwargs_combination, reduced_kwargs_combination
 
 
@@ -326,26 +329,29 @@ def gen_multiple_args_dictionnaries(nb_class, kwargs_init, classifier,
         The list of all the possible combination of asked arguments
 
     """
-    if framework=="multiview":
+    if framework == "multiview":
         classifier_config = get_path_dict(kwargs_init[classifier])
     else:
         classifier_config = kwargs_init[classifier]
-    multiple_kwargs_list, reduced_multiple_kwargs_list = gen_multiple_kwargs_combinations(classifier_config)
+    multiple_kwargs_list, reduced_multiple_kwargs_list = gen_multiple_kwargs_combinations(
+        classifier_config)
     multiple_kwargs_dict = dict(
-        (classifier+"_"+"_".join(map(str,list(reduced_dictionary.values()))), dictionary)
-        for reduced_dictionary, dictionary in zip(reduced_multiple_kwargs_list, multiple_kwargs_list ))
+        (classifier + "_" + "_".join(
+            map(str, list(reduced_dictionary.values()))), dictionary)
+        for reduced_dictionary, dictionary in
+        zip(reduced_multiple_kwargs_list, multiple_kwargs_list))
     args_dictionnaries = [gen_single_monoview_arg_dictionary(classifier_name,
                                                              arguments,
                                                              nb_class,
                                                              view_index=view_index,
                                                              view_name=view_name)
-                           if framework=="monoview" else
-                           gen_single_multiview_arg_dictionary(classifier_name,
-                                                            arguments,
-                                                            nb_class,
-                                                            views_dictionary=views_dictionary)
-                           for classifier_name, arguments
-                           in multiple_kwargs_dict.items()]
+                          if framework == "monoview" else
+                          gen_single_multiview_arg_dictionary(classifier_name,
+                                                              arguments,
+                                                              nb_class,
+                                                              views_dictionary=views_dictionary)
+                          for classifier_name, arguments
+                          in multiple_kwargs_dict.items()]
     return args_dictionnaries
 
 
@@ -370,14 +376,14 @@ def init_kwargs(args, classifiers_names, framework="monoview"):
     kwargs = {}
     for classifiers_name in classifiers_names:
         try:
-            if framework=="monoview":
+            if framework == "monoview":
                 getattr(monoview_classifiers, classifiers_name)
             else:
                 getattr(multiview_classifiers, classifiers_name)
         except AttributeError:
             raise AttributeError(
                 classifiers_name + " is not implemented in monoview_classifiers, "
-                                  "please specify the name of the file in monoview_classifiers")
+                                   "please specify the name of the file in monoview_classifiers")
         if classifiers_name in args:
             kwargs[classifiers_name] = args[classifiers_name]
         else:
@@ -406,9 +412,11 @@ def init_kwargs_func(args, benchmark):
     kwargs : dict
         The arguments for each mono- and multiview algorithms
     """
-    monoview_kwargs = init_kwargs(args, benchmark["monoview"], framework="monoview")
-    multiview_kwargs = init_kwargs(args, benchmark["multiview"], framework="multiview")
-    kwargs = {"monoview":monoview_kwargs, "multiview":multiview_kwargs}
+    monoview_kwargs = init_kwargs(args, benchmark["monoview"],
+                                  framework="monoview")
+    multiview_kwargs = init_kwargs(args, benchmark["multiview"],
+                                   framework="multiview")
+    kwargs = {"monoview": monoview_kwargs, "multiview": multiview_kwargs}
     return kwargs
 
 
@@ -505,16 +513,20 @@ def benchmark_init(directory, classification_indices, labels, labels_dictionary,
 
     """
     logging.debug("Start:\t Benchmark initialization")
-    if not os.path.exists(os.path.dirname(os.path.join(directory, "train_labels.csv"))):
+    if not os.path.exists(
+            os.path.dirname(os.path.join(directory, "train_labels.csv"))):
         try:
-            os.makedirs(os.path.dirname(os.path.join(directory, "train_labels.csv")))
+            os.makedirs(
+                os.path.dirname(os.path.join(directory, "train_labels.csv")))
         except OSError as exc:
             if exc.errno != errno.EEXIST:
                 raise
     train_indices = classification_indices[0]
     train_labels = dataset_var.get_labels(example_indices=train_indices)
-    np.savetxt(os.path.join(directory, "train_labels.csv"), train_labels, delimiter=",")
-    np.savetxt(os.path.join(directory, "train_indices.csv"), classification_indices[0],
+    np.savetxt(os.path.join(directory, "train_labels.csv"), train_labels,
+               delimiter=",")
+    np.savetxt(os.path.join(directory, "train_indices.csv"),
+               classification_indices[0],
                delimiter=",")
     results_monoview = []
     folds = k_folds.split(np.arange(len(train_labels)), train_labels)
@@ -652,10 +664,13 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
                                  hyper_param_search=None, metrics=None,
                                  argument_dictionaries=None,
                                  benchmark=None, views=None, views_indices=None,
-                                 flag=None, labels=None, track_tracebacks=False):
+                                 flag=None, labels=None,
+                                 track_tracebacks=False):
     results_monoview, labels_names = benchmark_init(directory,
-                                                 classification_indices, labels,
-                                                 labels_dictionary, k_folds, dataset_var)
+                                                    classification_indices,
+                                                    labels,
+                                                    labels_dictionary, k_folds,
+                                                    dataset_var)
     logging.getLogger('matplotlib.font_manager').disabled = True
     logging.debug("Start:\t monoview benchmark")
     traceback_outputs = {}
@@ -667,11 +682,14 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
                 exec_monoview(directory, X, Y, args["name"], labels_names,
                               classification_indices, k_folds,
                               1, args["file_type"], args["pathf"], random_state,
-                              hyper_param_search=hyper_param_search, metrics=metrics,
+                              hyper_param_search=hyper_param_search,
+                              metrics=metrics,
                               n_iter=args["hps_iter"], **arguments)]
         except:
             if track_tracebacks:
-                traceback_outputs[arguments["classifier_name"]+"-"+arguments["view_name"]] = traceback.format_exc()
+                traceback_outputs[
+                    arguments["classifier_name"] + "-" + arguments[
+                        "view_name"]] = traceback.format_exc()
             else:
                 raise
 
@@ -692,14 +710,18 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
     for arguments in argument_dictionaries["multiview"]:
         try:
             results_multiview += [
-                exec_multiview(directory, dataset_var, args["name"], classification_indices,
-                              k_folds, 1, args["file_type"],
-                              args["pathf"], labels_dictionary, random_state, labels,
-                              hyper_param_search=hyper_param_search,
-                              metrics=metrics, n_iter=args["hps_iter"], **arguments)]
+                exec_multiview(directory, dataset_var, args["name"],
+                               classification_indices,
+                               k_folds, 1, args["file_type"],
+                               args["pathf"], labels_dictionary, random_state,
+                               labels,
+                               hyper_param_search=hyper_param_search,
+                               metrics=metrics, n_iter=args["hps_iter"],
+                               **arguments)]
         except:
             if track_tracebacks:
-                traceback_outputs[arguments["classifier_name"]] = traceback.format_exc()
+                traceback_outputs[
+                    arguments["classifier_name"]] = traceback.format_exc()
             else:
                 raise
     logging.debug("Done:\t multiview benchmark")
@@ -709,7 +731,7 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
 
 def exec_benchmark(nb_cores, stats_iter,
                    benchmark_arguments_dictionaries,
-                   directory,  metrics, dataset_var, track_tracebacks,
+                   directory, metrics, dataset_var, track_tracebacks,
                    exec_one_benchmark_mono_core=exec_one_benchmark_mono_core,
                    get_results=get_results, delete=delete_HDF5,
                    analyze_iterations=analyze_iterations):
@@ -772,10 +794,14 @@ def exec_benchmark(nb_cores, stats_iter,
     #         benchmark_arguments_dictionaries[0])]
     # else:
     for arguments in benchmark_arguments_dictionaries:
-        benchmark_results = exec_one_benchmark_mono_core(dataset_var=dataset_var,
-                                                         track_tracebacks=track_tracebacks,
-                                                         **arguments)
-        analyze_iterations([benchmark_results], benchmark_arguments_dictionaries, stats_iter, metrics, example_ids=dataset_var.example_ids, labels=dataset_var.get_labels())
+        benchmark_results = exec_one_benchmark_mono_core(
+            dataset_var=dataset_var,
+            track_tracebacks=track_tracebacks,
+            **arguments)
+        analyze_iterations([benchmark_results],
+                           benchmark_arguments_dictionaries, stats_iter,
+                           metrics, example_ids=dataset_var.example_ids,
+                           labels=dataset_var.get_labels())
         results += [benchmark_results]
     logging.debug("Done:\t Executing all the needed biclass benchmarks")
 
@@ -823,44 +849,56 @@ def exec_classif(arguments):
                                                 args["file_type"],
                                                 args["name"])
     if not args["add_noise"]:
-        args["noise_std"]=[0.0]
+        args["noise_std"] = [0.0]
     for dataset_name in dataset_list:
         noise_results = []
         for noise_std in args["noise_std"]:
 
-            directory = execution.init_log_file(dataset_name, args["views"], args["file_type"],
-                                              args["log"], args["debug"], args["label"],
-                                              args["res_dir"], args["add_noise"], noise_std, args)
-
-            random_state = execution.init_random_state(args["random_state"], directory)
-            stats_iter_random_states = execution.init_stats_iter_random_states(stats_iter,
-                                                                        random_state)
-
-            get_database = execution.get_database_function(dataset_name, args["file_type"])
-
-            dataset_var, labels_dictionary, datasetname = get_database(args["views"],
-                                                                  args["pathf"], dataset_name,
-                                                                  args["nb_class"],
-                                                                  args["classes"],
-                                                                  random_state,
-                                                                  args["full"],
-                                                                  args["add_noise"],
-                                                                  noise_std)
+            directory = execution.init_log_file(dataset_name, args["views"],
+                                                args["file_type"],
+                                                args["log"], args["debug"],
+                                                args["label"],
+                                                args["res_dir"],
+                                                args["add_noise"], noise_std,
+                                                args)
+
+            random_state = execution.init_random_state(args["random_state"],
+                                                       directory)
+            stats_iter_random_states = execution.init_stats_iter_random_states(
+                stats_iter,
+                random_state)
+
+            get_database = execution.get_database_function(dataset_name,
+                                                           args["file_type"])
+
+            dataset_var, labels_dictionary, datasetname = get_database(
+                args["views"],
+                args["pathf"], dataset_name,
+                args["nb_class"],
+                args["classes"],
+                random_state,
+                args["full"],
+                args["add_noise"],
+                noise_std)
             args["name"] = datasetname
 
-            splits = execution.gen_splits(dataset_var.get_labels(), args["split"],
-                                         stats_iter_random_states)
+            splits = execution.gen_splits(dataset_var.get_labels(),
+                                          args["split"],
+                                          stats_iter_random_states)
 
             # multiclass_labels, labels_combinations, indices_multiclass = multiclass.gen_multiclass_labels(
             #     dataset_var.get_labels(), multiclass_method, splits)
 
             k_folds = execution.gen_k_folds(stats_iter, args["nb_folds"],
-                                         stats_iter_random_states)
-
-            dataset_files = dataset.init_multiple_datasets(args["pathf"], args["name"], nb_cores)
+                                            stats_iter_random_states)
 
+            dataset_files = dataset.init_multiple_datasets(args["pathf"],
+                                                           args["name"],
+                                                           nb_cores)
 
-            views, views_indices, all_views = execution.init_views(dataset_var, args["views"])
+            views, views_indices, all_views = execution.init_views(dataset_var,
+                                                                   args[
+                                                                       "views"])
             views_dictionary = dataset_var.get_view_dict()
             nb_views = len(views)
             nb_class = dataset_var.get_nb_class()
@@ -868,19 +906,23 @@ def exec_classif(arguments):
             metrics = [metric.split(":") for metric in args["metrics"]]
             if metrics == [["all"]]:
                 metrics_names = [name for _, name, isPackage
-                                in pkgutil.iter_modules(
-                        [os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'metrics')]) if
-                                not isPackage and name not in ["framework", "log_loss",
-                                                               "matthews_corrcoef",
-                                                               "roc_auc_score"]]
+                                 in pkgutil.iter_modules(
+                        [os.path.join(os.path.dirname(
+                            os.path.dirname(os.path.realpath(__file__))),
+                                      'metrics')]) if
+                                 not isPackage and name not in ["framework",
+                                                                "log_loss",
+                                                                "matthews_corrcoef",
+                                                                "roc_auc_score"]]
                 metrics = [[metricName] for metricName in metrics_names]
             metrics = arange_metrics(metrics, args["metric_princ"])
             for metricIndex, metric in enumerate(metrics):
                 if len(metric) == 1:
                     metrics[metricIndex] = [metric[0], None]
 
-            benchmark = init_benchmark(cl_type, monoview_algos, multiview_algos, args)
-            init_kwargs= init_kwargs_func(args, benchmark)
+            benchmark = init_benchmark(cl_type, monoview_algos, multiview_algos,
+                                       args)
+            init_kwargs = init_kwargs_func(args, benchmark)
             data_base_time = time.time() - start
             argument_dictionaries = init_argument_dictionaries(
                 benchmark, views_dictionary,
@@ -894,12 +936,12 @@ def exec_classif(arguments):
                 hyper_param_search, args, k_folds,
                 stats_iter_random_states, metrics,
                 argument_dictionaries, benchmark,
-                views, views_indices,)
+                views, views_indices, )
             results_mean_stds = exec_benchmark(
                 nb_cores, stats_iter,
-                benchmark_argument_dictionaries, directory, metrics, dataset_var,
+                benchmark_argument_dictionaries, directory, metrics,
+                dataset_var,
                 args["track_tracebacks"])
             noise_results.append([noise_std, results_mean_stds])
-            plot_results_noise(directory, noise_results, metrics[0][0], dataset_name)
-
-
+            plot_results_noise(directory, noise_results, metrics[0][0],
+                               dataset_name)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py b/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py
index 3d328b81687df16af2ea71f2155c030e344a7bde..4a7ca0b0f318e8483b6bc7cb464621ea27257f05 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py
@@ -31,5 +31,3 @@ for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
     __import__(module[:-3], locals(), globals(), [], 1)
     pass
 del os
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py
index f84d4eea8939cd4fe4ab4064074b889ede4f57ed..e9faae69ed7dd7c8a33dabbd43da6f78a80b7ab7 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py
@@ -3,9 +3,10 @@
  get_scorer: returns a sklearn scorer for grid search
 """
 
+import warnings
+
 from sklearn.metrics import accuracy_score as metric
 from sklearn.metrics import make_scorer
-import warnings
 
 warnings.warn("the accuracy_score module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py
index d037fd7ade36385417b77cc614ac670c5a54c7c6..6b9b89df0e5556ea89617f558d309e113fbf47d0 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py
@@ -3,9 +3,10 @@
  get_scorer: returns a sklearn scorer for grid search
 """
 
+import warnings
+
 from sklearn.metrics import f1_score as metric
 from sklearn.metrics import make_scorer
-import warnings
 
 warnings.warn("the f1_score module  is deprecated", DeprecationWarning,
               stacklevel=2)
@@ -14,15 +15,17 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
-def score(y_true, y_pred, multiclass=True, average='micro',  **kwargs):
+def score(y_true, y_pred, multiclass=True, average='micro', **kwargs):
     score = metric(y_true, y_pred, average=average, **kwargs)
     return score
 
 
 def get_scorer(average="micro", **kwargs):
-    return make_scorer(metric, greater_is_better=True, average=average, **kwargs)
+    return make_scorer(metric, greater_is_better=True, average=average,
+                       **kwargs)
 
 
 def get_config(average="micro", **kwargs, ):
-    config_string = "F1 score using average: {}, {} (higher is better)".format(average, kwargs)
+    config_string = "F1 score using average: {}, {} (higher is better)".format(
+        average, kwargs)
     return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py
index abf31e134b8e49f4da6cec163c8dd5716e5e2dfc..60a5141aa538ad4d204a705c18085de876066173 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import fbeta_score as metric
 from sklearn.metrics import make_scorer
-import warnings
 
 warnings.warn("the fbeta_score module is deprecated", DeprecationWarning,
               stacklevel=2)
@@ -21,5 +22,6 @@ def get_scorer(beta=2.0, average="micro", **kwargs):
 
 
 def get_config(beta=2.0, average="micro", **kwargs):
-    config_string = "F-beta score using beta: {}, average: {}, {} (higher is better)".format(beta, average, kwargs)
+    config_string = "F-beta score using beta: {}, average: {}, {} (higher is better)".format(
+        beta, average, kwargs)
     return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/generic_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/generic_score.py
index 9a004452b1737234ca1a775a150405dbb2e3c0ed..81d896d88039725a66adc7ab13a5d044106233b1 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/generic_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/generic_score.py
@@ -12,25 +12,27 @@ def score(y_true, y_pred, multiclass=False, type='f1_score', **kwargs):
     Returns:
     Weighted accuracy score for y_true, y_pred"""
     _type_names = ['accuracy_score', 'f1_score', 'fbeta_score', 'hamming_loss',
-                  'jaccard_similarity_score', 'precision_score', 'recall_score',
-                  'roc_auc_score', 'zero_one_loss', 'zero_one_loss', 'framework']
+                   'jaccard_similarity_score', 'precision_score',
+                   'recall_score',
+                   'roc_auc_score', 'zero_one_loss', 'zero_one_loss',
+                   'framework']
     if type not in _type_names:
         raise NameError('type  must be in :' + _type_names)
     version = -1
     try:
-       kwargs0  = kwargs["0"]
+        kwargs0 = kwargs["0"]
     except Exception:
-       kwargs0  = None
+        kwargs0 = None
     if type.startswith('matthews_corrcoef'):
         from sklearn.metrics import matthews_corrcoef
         score = matthews_corrcoef(y_true, y_pred)
     elif type.startswith('accuracy_score'):
         version = 0
         from sklearn.metrics import accuracy_score
-        score = accuracy_score (y_true, y_pred, sample_weight=kwargs0)
+        score = accuracy_score(y_true, y_pred, sample_weight=kwargs0)
     elif type.startswith('zero_one_loss'):
-         from sklearn.metrics import zero_one_loss
-         score = zero_one_loss(y_true, y_pred, sample_weight=kwargs0)
+        from sklearn.metrics import zero_one_loss
+        score = zero_one_loss(y_true, y_pred, sample_weight=kwargs0)
     elif type.startswith('hamming_loss'):
         from sklearn.metrics import hamming_loss
         classes = kwargs0
@@ -55,7 +57,7 @@ def score(y_true, y_pred, multiclass=False, type='f1_score', **kwargs):
             else:
                 average = "binary"
         score = f1_score(y_true, y_pred, sample_weight=kwargs0, labels=labels,
-                   pos_label=pos_label, average=average)
+                         pos_label=pos_label, average=average)
     elif type.startswith('fbeta_score'):
         from sklearn.metrics import fbeta_score
         try:
@@ -78,11 +80,11 @@ def score(y_true, y_pred, multiclass=False, type='f1_score', **kwargs):
             else:
                 average = "binary"
         score = fbeta_score(y_true, y_pred, beta, sample_weight=kwargs0,
-                       labels=labels, pos_label=pos_label,
-                       average=average)
+                            labels=labels, pos_label=pos_label,
+                            average=average)
     elif type.startswith('jaccard_similarity_score'):
-       from sklearn.metrics import jaccard_similarity_score
-       score = jaccard_similarity_score(y_true, y_pred, sample_weight=kwargs0)
+        from sklearn.metrics import jaccard_similarity_score
+        score = jaccard_similarity_score(y_true, y_pred, sample_weight=kwargs0)
     elif type.startswith('log_loss'):
         from sklearn.metrics import log_loss
         try:
@@ -91,68 +93,66 @@ def score(y_true, y_pred, multiclass=False, type='f1_score', **kwargs):
             eps = 1e-15
         score = log_loss(y_true, y_pred, sample_weight=kwargs0, eps=eps)
     elif type.startswith('precision_score'):
-         from sklearn.metrics import precision_score
-         try:
-             labels = kwargs["1"]
-         except Exception:
-             labels = None
-         try:
-             pos_label = kwargs["2"]
-         except Exception:
-             pos_label = 1
-         try:
-             average = kwargs["3"]
-         except Exception:
-             if multiclass:
-                 average = "micro"
-             else:
-                 average = "binary"
-         score = precision_score(y_true, y_pred,
+        from sklearn.metrics import precision_score
+        try:
+            labels = kwargs["1"]
+        except Exception:
+            labels = None
+        try:
+            pos_label = kwargs["2"]
+        except Exception:
+            pos_label = 1
+        try:
+            average = kwargs["3"]
+        except Exception:
+            if multiclass:
+                average = "micro"
+            else:
+                average = "binary"
+        score = precision_score(y_true, y_pred,
                                 sample_weight=kwargs0, labels=labels,
                                 pos_label=pos_label, average=average)
     elif type.startswith('recall_score'):
-         from sklearn.metrics import recall_score
-         try:
-             labels = kwargs["1"]
-         except Exception:
-             labels = None
-         try:
-             pos_label = kwargs["2"]
-         except Exception:
-             pos_label = 1
-         try:
-             average = kwargs["3"]
-         except Exception:
-             if multiclass:
-                 average = "micro"
-             else:
-                 average = "binary"
-         score = recall_score(y_true, y_pred, sample_weight=kwargs0,
-                              labels=labels,
-                              pos_label=pos_label, average=average)
+        from sklearn.metrics import recall_score
+        try:
+            labels = kwargs["1"]
+        except Exception:
+            labels = None
+        try:
+            pos_label = kwargs["2"]
+        except Exception:
+            pos_label = 1
+        try:
+            average = kwargs["3"]
+        except Exception:
+            if multiclass:
+                average = "micro"
+            else:
+                average = "binary"
+        score = recall_score(y_true, y_pred, sample_weight=kwargs0,
+                             labels=labels,
+                             pos_label=pos_label, average=average)
     elif type.startswith('roc_auc_score'):
-         from sklearn.metrics import roc_auc_score
-         from sklearn.preprocessing import MultiLabelBinarizer
-         try:
-             average = kwargs["1"]
-         except Exception:
-             if multiclass:
-                 average = "micro"
-             else:
-                 average = None
-         if multiclass:
-             mlb = MultiLabelBinarizer()
-             y_true = mlb.fit_transform([(label) for label in y_true])
-             y_pred = mlb.fit_transform([(label) for label in y_pred])
-         score = roc_auc_score(y_true, y_pred,
-                               sample_weight=kwargs0, average=average)
+        from sklearn.metrics import roc_auc_score
+        from sklearn.preprocessing import MultiLabelBinarizer
+        try:
+            average = kwargs["1"]
+        except Exception:
+            if multiclass:
+                average = "micro"
+            else:
+                average = None
+        if multiclass:
+            mlb = MultiLabelBinarizer()
+            y_true = mlb.fit_transform([(label) for label in y_true])
+            y_pred = mlb.fit_transform([(label) for label in y_pred])
+        score = roc_auc_score(y_true, y_pred,
+                              sample_weight=kwargs0, average=average)
     else:
         score = 0.0
         return score
 
 
-
-
 def get_scorer(type='f1_score', **kwargs):
     """Keyword Arguments:
     "0": weights to compute accuracy
@@ -160,15 +160,17 @@ def get_scorer(type='f1_score', **kwargs):
     Returns:
     A weighted sklearn scorer for accuracy"""
     _type_names = ['accuracy_score', 'f1_score', 'fbeta_score', 'hamming_loss',
-                  'jaccard_similarity_score', 'precision_score', 'recall_score',
-                  'roc_auc_score', 'zero_one_loss', 'zero_one_loss', 'framework']
+                   'jaccard_similarity_score', 'precision_score',
+                   'recall_score',
+                   'roc_auc_score', 'zero_one_loss', 'zero_one_loss',
+                   'framework']
     if type not in _type_names:
         raise NameError('type  must be in :' + _type_names)
     try:
         sample_weight = kwargs["0"]
     except Exception:
         sample_weight = None
-    if  type.startswith('accuracy_score'):
+    if type.startswith('accuracy_score'):
         version = 0
         from sklearn.metrics import accuracy_score as metric
         return make_scorer(metric, greater_is_better=True,
@@ -188,8 +190,8 @@ def get_scorer(type='f1_score', **kwargs):
             average = "binary"
         from sklearn.metrics import f1_score as metric
         return make_scorer(metric, greater_is_better=True,
-                       sample_weight=sample_weight, labels=labels,
-                       pos_label=pos_label, average=average)
+                           sample_weight=sample_weight, labels=labels,
+                           pos_label=pos_label, average=average)
     elif type.startswith('fbeta_score'):
         try:
             beta = kwargs["1"]
@@ -209,8 +211,8 @@ def get_scorer(type='f1_score', **kwargs):
             average = "binary"
         from sklearn.metrics import fbeta_score as metric
         return make_scorer(metric, greater_is_better=True, beta=beta,
-                       sample_weight=sample_weight, labels=labels,
-                       pos_label=pos_label, average=average)
+                           sample_weight=sample_weight, labels=labels,
+                           pos_label=pos_label, average=average)
     elif type.startswith('hamming_loss'):
         try:
             classes = kwargs["0"]
@@ -231,7 +233,7 @@ def get_scorer(type='f1_score', **kwargs):
         except Exception:
             eps = 1e-15
         return make_scorer(metric, greater_is_better=False,
-                   sample_weight=sample_weight, eps=eps)
+                           sample_weight=sample_weight, eps=eps)
     elif type.startswith('matthews_corrcoef'):
         from sklearn.metrics import matthews_corrcoef as metric
         return make_scorer(metric, greater_is_better=True)
@@ -251,9 +253,9 @@ def get_scorer(type='f1_score', **kwargs):
         except Exception:
             average = "binary"
         return make_scorer(metric, greater_is_better=True,
-                       sample_weight=sample_weight, labels=labels,
-                       pos_label=pos_label,
-                       average=average)
+                           sample_weight=sample_weight, labels=labels,
+                           pos_label=pos_label,
+                           average=average)
     elif type.startswith('recall_score'):
         try:
             sample_weight = kwargs["0"]
@@ -287,7 +289,7 @@ def get_scorer(type='f1_score', **kwargs):
     elif type.startswith('zero_one_loss'):
         from sklearn.metrics import zero_one_loss as metric
         return make_scorer(metric, greater_is_better=False,
-                       sample_weight=sample_weight)
+                           sample_weight=sample_weight)
     else:
         scorer = None
         return scorer
@@ -295,15 +297,17 @@ def get_scorer(type='f1_score', **kwargs):
 
 def get_config(type='f1_score', **kwargs):
     _type_names = ['accuracy_score', 'f1_score', 'fbeta_score', 'hamming_loss',
-                  'jaccard_similarity_score', 'precision_score', 'recall_score',
-                  'roc_auc_score', 'zero_one_loss', 'zero_one_loss', 'framework']
+                   'jaccard_similarity_score', 'precision_score',
+                   'recall_score',
+                   'roc_auc_score', 'zero_one_loss', 'zero_one_loss',
+                   'framework']
     if type not in _type_names:
         raise NameError('type  must be in :' + _type_names)
     try:
         sample_weight = kwargs["0"]
     except Exception:
         sample_weight = None
-    if  type.startswith('accuracy_score'):
+    if type.startswith('accuracy_score'):
         config_string = "Accuracy score using " + str(
             sample_weight) + " as sample_weights (higher is better)"
     elif type.startswith('f1_score'):
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py
index fafb1a8b7f179d5c94ae24e95fccf55d267fa303..665dd243721d3d93e121b7d010f21c44dc3c528c 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import hamming_loss as metric
 from sklearn.metrics import make_scorer
-import warnings
 
 warnings.warn("the hamming_loss module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_similarity_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_similarity_score.py
index c9feb61f071c5932d06e2bb948f841a7fa68df13..2d7b639a0b7baa4bc707ec3eb985b165f32014de 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_similarity_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_similarity_score.py
@@ -1,8 +1,10 @@
+import warnings
+
 from sklearn.metrics import jaccard_similarity_score as metric
 from sklearn.metrics import make_scorer
-import warnings
 
-warnings.warn("the jaccard_similarity_score module  is deprecated", DeprecationWarning,
+warnings.warn("the jaccard_similarity_score module  is deprecated",
+              DeprecationWarning,
               stacklevel=2)
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -20,5 +22,6 @@ def get_scorer(**kwargs):
 
 
 def get_config(**kwargs):
-    config_string = "Jaccard_similarity score using {} (higher is better)".format(kwargs)
+    config_string = "Jaccard_similarity score using {} (higher is better)".format(
+        kwargs)
     return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py
index 93ecd7d3133d3c5d3367323b549ad77b157dbc03..2b5ab917d973e9a1e62437ea497c0a40d75b81e3 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import log_loss as metric
 from sklearn.metrics import make_scorer
-import warnings
 
 warnings.warn("the log_loss module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py b/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py
index 80307efb356f7564ee9e065f8f31e4397c6ae302..b3b8ec6c125a867cf3a1c4a1f9b41b51ed4129c8 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import make_scorer
 from sklearn.metrics import matthews_corrcoef as metric
-import warnings
 
 warnings.warn("the matthews_corrcoef module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py
index e72cb4451cd0aff9e43d0eede78b0cf3eaef5d05..d1c861f91a39441a961ff2ff2ef3e79aafbe060e 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py
@@ -1,12 +1,15 @@
+import warnings
+
 from sklearn.metrics import make_scorer
 from sklearn.metrics import precision_score as metric
-import warnings
+
 warnings.warn("the precision_score module  is deprecated", DeprecationWarning,
               stacklevel=2)
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+
 def score(y_true, y_pred, average='micro', multiclass=False, **kwargs):
     score = metric(y_true, y_pred, average=average, **kwargs)
     return score
@@ -18,5 +21,6 @@ def get_scorer(average='micro', **kwargs):
 
 
 def get_config(average='micro', **kwargs):
-    config_string = "Precision score using average: {}, {} (higher is better)".format(average, kwargs)
+    config_string = "Precision score using average: {}, {} (higher is better)".format(
+        average, kwargs)
     return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py
index 847783e5ce207a98be397d27c3a5bd0b82656aa6..261261990b060b3b759e6013647f3285fd9c9e2c 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import make_scorer
 from sklearn.metrics import recall_score as metric
-import warnings
 
 warnings.warn("the recall_score module  is deprecated", DeprecationWarning,
               stacklevel=2)
@@ -20,5 +21,6 @@ def get_scorer(average='micro', **kwargs):
 
 
 def get_config(average="micro", **kwargs):
-    configString = "Recall score using average: {}, {} (higher is better)".format(average, kwargs)
+    configString = "Recall score using average: {}, {} (higher is better)".format(
+        average, kwargs)
     return configString
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py
index 58b25dca7b2aac1a04f63d8c2f33edd29ddddcb8..927de41240d89e0ec71f04459e22dbd228c7b6ac 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py
@@ -1,7 +1,8 @@
+import warnings
+
 from sklearn.metrics import make_scorer
 from sklearn.metrics import roc_auc_score as metric
 from sklearn.preprocessing import MultiLabelBinarizer
-import warnings
 
 warnings.warn("the roc_auc_score module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py
index 85d949ff23863d3b32259023be59c4e1980b71c5..e3a3449247edf934251ddbc4dbb8283bbf632746 100644
--- a/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py
+++ b/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py
@@ -1,6 +1,7 @@
+import warnings
+
 from sklearn.metrics import make_scorer
 from sklearn.metrics import zero_one_loss as metric
-import warnings
 
 warnings.warn("the zero_one_loss module  is deprecated", DeprecationWarning,
               stacklevel=2)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
index 0f7c93d785e931aca98b0fc4e09c4a3b56caaecf..45f13a5ceca6765bb1748d502684decd16aec587 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
@@ -3,8 +3,8 @@ from datetime import timedelta as hms
 from .. import metrics
 
 
-def getDBConfigString(name, feat, classification_indices, shape,
-                      class_labels_names, KFolds):
+def get_db_config_string(name, feat, classification_indices, shape,
+                         class_labels_names, k_folds):
     """
     
     Parameters
@@ -14,93 +14,96 @@ def getDBConfigString(name, feat, classification_indices, shape,
     classification_indices
     shape
     class_labels_names
-    KFolds
+    k_folds
 
     Returns
     -------
 
     """
-    learningRate = float(len(classification_indices[0])) / (
+    learning_rate = float(len(classification_indices[0])) / (
             len(classification_indices[0]) + len(classification_indices[1]))
-    dbConfigString = "Database configuration : \n"
-    dbConfigString += "\t- Database name : " + name + "\n"
-    dbConfigString += "\t- View name : " + feat + "\t View shape : " + str(
+    db_config_string = "Database configuration : \n"
+    db_config_string += "\t- Database name : " + name + "\n"
+    db_config_string += "\t- View name : " + feat + "\t View shape : " + str(
         shape) + "\n"
-    dbConfigString += "\t- Learning Rate : " + str(learningRate) + "\n"
-    dbConfigString += "\t- Labels used : " + ", ".join(
+    db_config_string += "\t- Learning Rate : " + str(learning_rate) + "\n"
+    db_config_string += "\t- Labels used : " + ", ".join(
         class_labels_names) + "\n"
-    dbConfigString += "\t- Number of cross validation folds : " + str(
-        KFolds.n_splits) + "\n\n"
-    return dbConfigString
+    db_config_string += "\t- Number of cross validation folds : " + str(
+        k_folds.n_splits) + "\n\n"
+    return db_config_string
 
 
-def getClassifierConfigString(gridSearch, nbCores, nIter, clKWARGS, classifier,
-                              output_file_name, y_test):
-    classifierConfigString = "Classifier configuration : \n"
-    classifierConfigString += "\t- " + classifier.get_config()[5:] + "\n"
-    classifierConfigString += "\t- Executed on " + str(nbCores) + " core(s) \n"
-    if gridSearch:
-        classifierConfigString += "\t- Got configuration using randomized search with " + str(
-            nIter) + " iterations \n"
-    classifierConfigString += "\n\n"
-    classifierInterpretString = classifier.get_interpretation(output_file_name,
-                                                              y_test)
-    return classifierConfigString, classifierInterpretString
+def get_classifier_config_string(grid_search, nb_cores, n_iter, cl_kwargs,
+                                 classifier,
+                                 output_file_name, y_test):
+    classifier_config_string = "Classifier configuration : \n"
+    classifier_config_string += "\t- " + classifier.get_config()[5:] + "\n"
+    classifier_config_string += "\t- Executed on " + str(
+        nb_cores) + " core(s) \n"
+    if grid_search:
+        classifier_config_string += "\t- Got configuration using randomized search with " + str(
+            n_iter) + " iterations \n"
+    classifier_config_string += "\n\n"
+    classifier_interpret_string = classifier.get_interpretation(
+        output_file_name,
+        y_test)
+    return classifier_config_string, classifier_interpret_string
 
 
-def getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred):
-    metricModule = getattr(metrics, metric[0])
+def get_metric_score(metric, y_train, y_train_pred, y_test, y_test_pred):
+    metric_module = getattr(metrics, metric[0])
     if metric[1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in
-                            enumerate(metric[1]))
+        metric_kwargs = dict((index, metricConfig) for index, metricConfig in
+                             enumerate(metric[1]))
     else:
-        metricKWARGS = {}
-    metricScoreTrain = metricModule.score(y_train, y_train_pred)
-    metricScoreTest = metricModule.score(y_test, y_test_pred)
-    metricScoreString = "\tFor " + metricModule.get_config(
-        **metricKWARGS) + " : "
-    metricScoreString += "\n\t\t- Score on train : " + str(metricScoreTrain)
-    metricScoreString += "\n\t\t- Score on test : " + str(metricScoreTest)
-    metricScoreString += "\n"
-    return metricScoreString, [metricScoreTrain, metricScoreTest]
+        metric_kwargs = {}
+    metric_score_train = metric_module.score(y_train, y_train_pred)
+    metric_score_test = metric_module.score(y_test, y_test_pred)
+    metric_score_string = "\tFor " + metric_module.get_config(
+        **metric_kwargs) + " : "
+    metric_score_string += "\n\t\t- Score on train : " + str(metric_score_train)
+    metric_score_string += "\n\t\t- Score on test : " + str(metric_score_test)
+    metric_score_string += "\n"
+    return metric_score_string, [metric_score_train, metric_score_test]
 
 
-def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics_list,
-            nIter,
-            feat, CL_type, clKWARGS, classLabelsNames,
+def execute(name, learning_rate, k_folds, nb_cores, grid_search, metrics_list,
+            n_iter,
+            feat, cl_type, cl_kwargs, class_labels_names,
             shape, y_train, y_train_pred, y_test, y_test_pred, time,
             random_state, classifier, output_file_name):
-    metricsScores = {}
-    metricModule = getattr(metrics, metrics_list[0][0])
-    trainScore = metricModule.score(y_train, y_train_pred)
-    testScore = metricModule.score(y_test, y_test_pred)
-    stringAnalysis = "Classification on " + name + " database for " + feat + " with " + CL_type + ".\n\n"
-    stringAnalysis += metrics_list[0][0] + " on train : " + str(
-        trainScore) + "\n" + \
-                      metrics_list[0][0] + " on test : " + str(
-        testScore) + "\n\n"
-    stringAnalysis += getDBConfigString(name, feat, learningRate, shape,
-                                        classLabelsNames, KFolds)
-    classifierConfigString, classifierIntepretString = getClassifierConfigString(
-        gridSearch, nbCores, nIter, clKWARGS, classifier, output_file_name,
+    metrics_scores = {}
+    metric_module = getattr(metrics, metrics_list[0][0])
+    train_score = metric_module.score(y_train, y_train_pred)
+    test_score = metric_module.score(y_test, y_test_pred)
+    string_analysis = "Classification on " + name + " database for " + feat + " with " + cl_type + ".\n\n"
+    string_analysis += metrics_list[0][0] + " on train : " + str(
+        train_score) + "\n" + \
+                       metrics_list[0][0] + " on test : " + str(
+        test_score) + "\n\n"
+    string_analysis += get_db_config_string(name, feat, learning_rate, shape,
+                                            class_labels_names, k_folds)
+    classifier_config_string, classifier_intepret_string = get_classifier_config_string(
+        grid_search, nb_cores, n_iter, cl_kwargs, classifier, output_file_name,
         y_test)
-    stringAnalysis += classifierConfigString
+    string_analysis += classifier_config_string
     for metric in metrics_list:
-        metricString, metricScore = getMetricScore(metric, y_train,
-                                                   y_train_pred, y_test,
-                                                   y_test_pred)
-        stringAnalysis += metricString
-        metricsScores[metric[0]] = metricScore
-        # stringAnalysis += getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred)
+        metric_string, metric_score = get_metric_score(metric, y_train,
+                                                       y_train_pred, y_test,
+                                                       y_test_pred)
+        string_analysis += metric_string
+        metrics_scores[metric[0]] = metric_score
+        # string_analysis += getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred)
         # if metric[1] is not None:
         #     metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
         # else:
         #     metricKWARGS = {}
-        # metricsScores[metric[0]] = [getattr(metrics, metric[0]).score(y_train, y_train_pred),
+        # metrics_scores[metric[0]] = [getattr(metrics, metric[0]).score(y_train, y_train_pred),
         #                             getattr(metrics, metric[0]).score(y_test, y_test_pred)]
-    stringAnalysis += "\n\n Classification took " + str(hms(seconds=int(time)))
-    stringAnalysis += "\n\n Classifier Interpretation : \n"
-    stringAnalysis += classifierIntepretString
+    string_analysis += "\n\n Classification took " + str(hms(seconds=int(time)))
+    string_analysis += "\n\n Classifier Interpretation : \n"
+    string_analysis += classifier_intepret_string
 
-    imageAnalysis = {}
-    return stringAnalysis, imageAnalysis, metricsScores
+    image_analysis = {}
+    return string_analysis, image_analysis, metrics_scores
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
index cc8f791b46e523e07be6b693964fe06bc9133f22..41f6cb13bcf515930f069e2ec2ec5ac922c91436 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
@@ -16,8 +16,8 @@ from . import monoview_utils
 from .analyze_result import execute
 # Import own modules
 from .. import monoview_classifiers
-from ..utils.dataset import extract_subset, HDF5Dataset
 from ..utils import hyper_parameter_search
+from ..utils.dataset import extract_subset, HDF5Dataset
 from ..utils.multiclass import get_mc_estim
 
 # Author-Info
@@ -28,28 +28,34 @@ __status__ = "Prototype"  # Production, Development, Prototype
 # __date__ = 2016 - 03 - 25
 
 
-def exec_monoview_multicore(directory, name, labels_names, classification_indices,
-                           k_folds, dataset_file_index, database_type,
-                           path, random_state, labels,
-                           hyper_param_search="randomized_search",
-                           metrics=[["accuracy_score", None]], n_iter=30,
-                           **args):
-    dataset_var = HDF5Dataset(hdf5_file=h5py.File(path + name + str(dataset_file_index) + ".hdf5", "r"))
+def exec_monoview_multicore(directory, name, labels_names,
+                            classification_indices,
+                            k_folds, dataset_file_index, database_type,
+                            path, random_state, labels,
+                            hyper_param_search="randomized_search",
+                            metrics=[["accuracy_score", None]], n_iter=30,
+                            **args):
+    dataset_var = HDF5Dataset(
+        hdf5_file=h5py.File(path + name + str(dataset_file_index) + ".hdf5",
+                            "r"))
     neededViewIndex = args["view_index"]
     X = dataset_var.get_v(neededViewIndex)
     Y = labels
     return exec_monoview(directory, X, Y, name, labels_names,
-                         classification_indices, k_folds, 1, database_type, path,
+                         classification_indices, k_folds, 1, database_type,
+                         path,
                          random_state, hyper_param_search=hyper_param_search,
                          metrics=metrics, n_iter=n_iter,
-                         view_name=dataset_var.get_view_name(args["view_index"]),
+                         view_name=dataset_var.get_view_name(
+                             args["view_index"]),
                          **args)
 
 
 def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
                   KFolds, nbCores, databaseType, path,
                   random_state, hyper_param_search="randomized_search",
-                  metrics=[["accuracy_score", None]], n_iter=30, view_name="", **args):
+                  metrics=[["accuracy_score", None]], n_iter=30, view_name="",
+                  **args):
     logging.debug("Start:\t Loading data")
     kwargs, \
     t_start, \
@@ -58,7 +64,8 @@ def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
     X, \
     learningRate, \
     labelsString, \
-    outputFileName = initConstants(args, X, classification_indices, labels_names,
+    outputFileName = initConstants(args, X, classification_indices,
+                                   labels_names,
                                    name, directory, view_name)
     logging.debug("Done:\t Loading data")
 
@@ -70,7 +77,8 @@ def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
         + str(nbCores) + ", algorithm : " + classifier_name)
 
     logging.debug("Start:\t Determine Train/Test split")
-    X_train, y_train, X_test, y_test = init_train_test(X, Y, classification_indices)
+    X_train, y_train, X_test, y_test = init_train_test(X, Y,
+                                                       classification_indices)
 
     logging.debug("Info:\t Shape X_train:" + str(
         X_train.shape) + ", Length of y_train:" + str(len(y_train)))
@@ -81,11 +89,12 @@ def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
     logging.debug("Start:\t Generate classifier args")
     classifier_module = getattr(monoview_classifiers, classifier_name)
     classifier_class_name = classifier_module.classifier_class_name
-    cl_kwargs, testFoldsPreds = getHPs(classifier_module, hyper_param_search,
-                                       n_iter, classifier_name, classifier_class_name,
-                                       X_train, y_train,
-                                       random_state, outputFileName,
-                                       KFolds, nbCores, metrics, kwargs)
+    cl_kwargs, test_folds_preds = getHPs(classifier_module, hyper_param_search,
+                                         n_iter, classifier_name,
+                                         classifier_class_name,
+                                         X_train, y_train,
+                                         random_state, outputFileName,
+                                         KFolds, nbCores, metrics, kwargs)
     logging.debug("Done:\t Generate classifier args")
 
     logging.debug("Start:\t Training")
@@ -110,7 +119,6 @@ def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
     for testIndex, index in enumerate(classification_indices[1]):
         full_pred[index] = y_test_pred[testIndex]
 
-
     logging.debug("Done:\t Predicting")
 
     t_end = time.time() - t_start
@@ -118,26 +126,29 @@ def exec_monoview(directory, X, Y, name, labels_names, classification_indices,
         "Info:\t Time for training and predicting: " + str(t_end) + "[s]")
 
     logging.debug("Start:\t Getting results")
-    stringAnalysis, \
-    imagesAnalysis, \
-    metricsScores = execute(name, classification_indices, KFolds, nbCores,
-                            hyper_parameter_search, metrics, n_iter, view_name, classifier_name,
-                            cl_kwargs, labels_names, X.shape,
-                            y_train, y_train_pred, y_test, y_test_pred, t_end,
-                            random_state, classifier, outputFileName)
+    string_analysis, \
+    images_analysis, \
+    metrics_scores = execute(name, classification_indices, KFolds, nbCores,
+                             hyper_parameter_search, metrics, n_iter, view_name,
+                             classifier_name,
+                             cl_kwargs, labels_names, X.shape,
+                             y_train, y_train_pred, y_test, y_test_pred, t_end,
+                             random_state, classifier, outputFileName)
     logging.debug("Done:\t Getting results")
 
     logging.debug("Start:\t Saving preds")
-    saveResults(stringAnalysis, outputFileName, full_pred, y_train_pred,
-                y_train, imagesAnalysis, y_test)
+    save_results(string_analysis, outputFileName, full_pred, y_train_pred,
+                 y_train, images_analysis, y_test)
     logging.info("Done:\t Saving results")
 
-    viewIndex = args["view_index"]
-    if testFoldsPreds is None:
-        testFoldsPreds = y_train_pred
-    return monoview_utils.MonoviewResult(viewIndex, classifier_name, view_name, metricsScores,
+    view_index = args["view_index"]
+    if test_folds_preds is None:
+        test_folds_preds = y_train_pred
+    return monoview_utils.MonoviewResult(view_index, classifier_name, view_name,
+                                         metrics_scores,
                                          full_pred, cl_kwargs,
-                                         testFoldsPreds, classifier, X_train.shape[1])
+                                         test_folds_preds, classifier,
+                                         X_train.shape[1])
 
 
 def initConstants(args, X, classification_indices, labels_names,
@@ -153,8 +164,8 @@ def initConstants(args, X, classification_indices, labels_names,
     labels_string = "-".join(labels_names)
     cl_type_string = cl_type
     output_file_name = os.path.join(directory, cl_type_string, view_name,
-                                  cl_type_string + '-' + name + "-" +
-                                  view_name + "-")
+                                    cl_type_string + '-' + name + "-" +
+                                    view_name + "-")
     if not os.path.exists(os.path.dirname(output_file_name)):
         try:
             os.makedirs(os.path.dirname(output_file_name))
@@ -164,196 +175,72 @@ def initConstants(args, X, classification_indices, labels_names,
     return kwargs, t_start, view_name, cl_type, X, learning_rate, labels_string, output_file_name
 
 
-def init_train_test(X, Y, classificationIndices):
-    trainIndices, testIndices = classificationIndices
-    X_train = extract_subset(X, trainIndices)
-    X_test = extract_subset(X, testIndices)
-    y_train = Y[trainIndices]
-    y_test = Y[testIndices]
+def init_train_test(X, Y, classification_indices):
+    train_indices, test_indices = classification_indices
+    X_train = extract_subset(X, train_indices)
+    X_test = extract_subset(X, test_indices)
+    y_train = Y[train_indices]
+    y_test = Y[test_indices]
     return X_train, y_train, X_test, y_test
 
 
-def getHPs(classifierModule, hyper_param_search, nIter, classifier_module_name,
+def getHPs(classifier_module, hyper_param_search, nIter, classifier_module_name,
            classifier_class_name, X_train, y_train,
            random_state,
-           outputFileName, KFolds, nbCores, metrics, kwargs):
+           output_file_name, k_folds, nb_cores, metrics, kwargs):
     if hyper_param_search != "None":
         logging.debug(
             "Start:\t " + hyper_param_search + " best settings with " + str(
                 nIter) + " iterations for " + classifier_module_name)
-        classifierHPSearch = getattr(hyper_parameter_search, hyper_param_search.split("-")[0])
-        clKWARGS, testFoldsPreds = classifierHPSearch(X_train, y_train, "monoview",
-                                                      random_state,
-                                                      outputFileName,
-                                                      classifierModule,
-                                                      classifier_class_name,
-                                                      folds=KFolds,
-                                                      nb_cores=nbCores,
-                                                      metric=metrics[0],
-                                                      n_iter=nIter,
-                                                      classifier_kwargs=kwargs[
-                                                          classifier_module_name])
+        classifier_hp_search = getattr(hyper_parameter_search,
+                                       hyper_param_search.split("-")[0])
+        cl_kwargs, test_folds_preds = classifier_hp_search(X_train, y_train,
+                                                           "monoview",
+                                                           random_state,
+                                                           output_file_name,
+                                                           classifier_module,
+                                                           classifier_class_name,
+                                                           folds=k_folds,
+                                                           nb_cores=nb_cores,
+                                                           metric=metrics[0],
+                                                           n_iter=nIter,
+                                                           classifier_kwargs=
+                                                           kwargs[
+                                                               classifier_module_name])
         logging.debug("Done:\t " + hyper_param_search + " best settings")
     else:
-        clKWARGS = kwargs[classifier_module_name]
-        testFoldsPreds = None
-    return clKWARGS, testFoldsPreds
-
-
-def saveResults(stringAnalysis, outputFileName, full_labels_pred, y_train_pred,
-                y_train, imagesAnalysis, y_test):
-    logging.info(stringAnalysis)
-    outputTextFile = open(outputFileName + 'summary.txt', 'w')
-    outputTextFile.write(stringAnalysis)
-    outputTextFile.close()
-    np.savetxt(outputFileName + "full_pred.csv",
+        cl_kwargs = kwargs[classifier_module_name]
+        test_folds_preds = None
+    return cl_kwargs, test_folds_preds
+
+
+def save_results(string_analysis, output_file_name, full_labels_pred,
+                 y_train_pred,
+                 y_train, images_analysis, y_test):
+    logging.info(string_analysis)
+    output_text_file = open(output_file_name + 'summary.txt', 'w')
+    output_text_file.write(string_analysis)
+    output_text_file.close()
+    np.savetxt(output_file_name + "full_pred.csv",
                full_labels_pred.astype(np.int16), delimiter=",")
-    np.savetxt(outputFileName + "train_pred.csv", y_train_pred.astype(np.int16),
+    np.savetxt(output_file_name + "train_pred.csv",
+               y_train_pred.astype(np.int16),
                delimiter=",")
-    np.savetxt(outputFileName + "train_labels.csv", y_train.astype(np.int16),
+    np.savetxt(output_file_name + "train_labels.csv", y_train.astype(np.int16),
                delimiter=",")
-    np.savetxt(outputFileName + "test_labels.csv", y_test.astype(np.int16),
+    np.savetxt(output_file_name + "test_labels.csv", y_test.astype(np.int16),
                delimiter=",")
 
-    if imagesAnalysis is not None:
-        for imageName in imagesAnalysis:
-            if os.path.isfile(outputFileName + imageName + ".png"):
+    if images_analysis is not None:
+        for image_name in images_analysis:
+            if os.path.isfile(output_file_name + image_name + ".png"):
                 for i in range(1, 20):
-                    testFileName = outputFileName + imageName + "-" + str(
+                    test_file_name = output_file_name + image_name + "-" + str(
                         i) + ".png"
-                    if not os.path.isfile(testFileName):
-                        imagesAnalysis[imageName].savefig(testFileName, transparent=True)
+                    if not os.path.isfile(test_file_name):
+                        images_analysis[image_name].savefig(test_file_name,
+                                                            transparent=True)
                         break
 
-            imagesAnalysis[imageName].savefig(
-                outputFileName + imageName + '.png', transparent=True)
-
-
-# if __name__ == '__main__':
-#     """The goal of this part of the module is to be able to execute a monoview experimentation
-#      on a node of a cluster independently.
-#      So one need to fill in all the ExecMonoview function arguments with the parse arg function
-#      It could be a good idea to use pickle to store all the 'simple' args in order to reload them easily"""
-#     import argparse
-#     import pickle
-#
-#     from ..utils import dataset
-#
-#     parser = argparse.ArgumentParser(
-#         description='This methods is used to execute a multiclass classification with one single view. ',
-#         formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-#
-#     groupStandard = parser.add_argument_group('Standard arguments')
-#     groupStandard.add_argument('-log', action='store_true',
-#                                help='Use option to activate Logging to Console')
-#     groupStandard.add_argument('--name', metavar='STRING', action='store',
-#                                help='Name of Database', default='Plausible')
-#     groupStandard.add_argument('--cl_name', metavar='STRING', action='store',
-#                                help='THe name of the monoview classifier to use',
-#                                default='DecisionTree')
-#     groupStandard.add_argument('--view', metavar='STRING', action='store',
-#                                help='Name of the view used', default='View0')
-#     groupStandard.add_argument('--pathF', metavar='STRING', action='store',
-#                                help='Path to the database hdf5 file',
-#                                default='../../../data/Plausible')
-#     groupStandard.add_argument('--directory', metavar='STRING', action='store',
-#                                help='Path of the output directory', default='')
-#     groupStandard.add_argument('--labels_names', metavar='STRING',
-#                                action='store', nargs='+',
-#                                help='Name of the labels used for classification',
-#                                default=['Yes', 'No'])
-#     groupStandard.add_argument('--classificationIndices', metavar='STRING',
-#                                action='store',
-#                                help='Path to the classificationIndices pickle file',
-#                                default='')
-#     groupStandard.add_argument('--KFolds', metavar='STRING', action='store',
-#                                help='Path to the k_folds pickle file',
-#                                default='')
-#     groupStandard.add_argument('--nbCores', metavar='INT', action='store',
-#                                help='Number of cores, -1 for all',
-#                                type=int, default=1)
-#     groupStandard.add_argument('--randomState', metavar='INT', action='store',
-#                                help='Seed for the random state or pickable randomstate file',
-#                                default=42)
-#     groupStandard.add_argument('--hyper_param_search', metavar='STRING',
-#                                action='store',
-#                                help='The type of method used to search the best set of hyper parameters',
-#                                default='randomizedSearch')
-#     groupStandard.add_argument('--metrics', metavar='STRING', action='store',
-#                                help='Path to the pickle file describing the metricsused to analyze the performance',
-#                                default='')
-#     groupStandard.add_argument('--kwargs', metavar='STRING', action='store',
-#                                help='Path to the pickle file containing the key-words arguments used for classification',
-#                                default='')
-#     groupStandard.add_argument('--nIter', metavar='INT', action='store',
-#                                help='Number of itetarion in hyper parameter search',
-#                                type=int,
-#                                default=10)
-#
-#     args = parser.parse_args()
-#
-#     directory = args.directory
-#     name = args.name
-#     classifierName = args.cl_name
-#     labels_names = args.labels_names
-#     viewName = args.view
-#     with open(args.classificationIndices, 'rb') as handle:
-#         classificationIndices = pickle.load(handle)
-#     with open(args.KFolds, 'rb') as handle:
-#         KFolds = pickle.load(handle)
-#     nbCores = args.nbCores
-#     path = args.pathF
-#     with open(args.randomState, 'rb') as handle:
-#         randomState = pickle.load(handle)
-#     hyper_param_search = args.hyper_param_search
-#     with open(args.metrics, 'rb') as handle:
-#         metrics = pickle.load(handle)
-#     nIter = args.nIter
-#     with open(args.kwargs, 'rb') as handle:
-#         kwargs = pickle.load(handle)
-#
-#     databaseType = None
-#
-#     # Extract the data using MPI
-#     X, Y = dataset.get_monoview_shared(path, name, viewName)
-#
-#     # Init log
-#     logFileName = time.strftime(
-#         "%Y_%m_%d-%H_%M_%S") + "-" + name + "-" + viewName + "-" + classifierName + '-LOG'
-#     if not os.path.exists(os.path.dirname(directory + logFileName)):
-#         try:
-#             os.makedirs(os.path.dirname(directory + logFileName))
-#         except OSError as exc:
-#             if exc.errno != errno.EEXIST:
-#                 raise
-#     logFile = directory + logFileName
-#     if os.path.isfile(logFile + ".log"):
-#         for i in range(1, 20):
-#             testFileName = logFileName + "-" + str(i) + ".log"
-#             if not (os.path.isfile(directory + testFileName)):
-#                 logFile = directory + testFileName
-#                 break
-#     else:
-#         logFile += ".log"
-#     logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
-#                         filename=logFile, level=logging.DEBUG,
-#                         filemode='w')
-#     if args.log:
-#         logging.getLogger().addHandler(logging.StreamHandler())
-#
-#     # Computing on multiple cores
-#     res = ExecMonoview(directory, X, Y, name, labels_names,
-#                        classificationIndices, KFolds, nbCores, databaseType,
-#                        path,
-#                        randomState, hyper_param_search=hyper_param_search,
-#                        metrics=metrics, nIter=nIter, **kwargs)
-#
-#     with open(directory + "res.pickle", "wb") as handle:
-#         pickle.dump(res, handle)
-
-    # Pickle the res in a file to be reused.
-    # Go put a token in the token files without breaking everything.
-
-    # Need to write a function to be  able to know the timeu sed
-    # for a monoview experimentation approximately and the ressource it uses to write automatically the file in the shell
-    # it will have to be a not-too close approx as the taskswont be long and Ram-o-phage
+            images_analysis[image_name].savefig(
+                output_file_name + image_name + '.png', transparent=True)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py b/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
index 8e7381c4ccd741a90028c8ec05210ffe819591b8..4a0a3b8e0f345d1dd807991461c5bcdaebacaf17 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
@@ -1,14 +1,10 @@
 import pickle
 
 import matplotlib.pyplot as plt
-from abc import abstractmethod
 import numpy as np
 from matplotlib.ticker import FuncFormatter
 from scipy.stats import uniform, randint
-from sklearn.base import BaseEstimator, ClassifierMixin
-from sklearn.model_selection import RandomizedSearchCV
 
-from .. import metrics
 from ..utils.base import BaseClassifier
 
 # Author-Info
@@ -63,19 +59,20 @@ def compute_possible_combinations(params_dict):
     return n_possibs
 
 
-def genTestFoldsPreds(X_train, y_train, KFolds, estimator):
-    testFoldsPreds = []
-    trainIndex = np.arange(len(y_train))
-    folds = KFolds.split(trainIndex, y_train)
-    foldLengths = np.zeros(KFolds.n_splits, dtype=int)
-    for foldIndex, (trainIndices, testIndices) in enumerate(folds):
-        foldLengths[foldIndex] = len(testIndices)
-        estimator.fit(X_train[trainIndices], y_train[trainIndices])
-        testFoldsPreds.append(estimator.predict(X_train[trainIndices]))
-    minFoldLength = foldLengths.min()
-    testFoldsPreds = np.array(
-        [testFoldPreds[:minFoldLength] for testFoldPreds in testFoldsPreds])
-    return testFoldsPreds
+def gen_test_folds_preds(X_train, y_train, KFolds, estimator):
+    test_folds_preds = []
+    train_index = np.arange(len(y_train))
+    folds = KFolds.split(train_index, y_train)
+    fold_lengths = np.zeros(KFolds.n_splits, dtype=int)
+    for fold_index, (train_indices, test_indices) in enumerate(folds):
+        fold_lengths[fold_index] = len(test_indices)
+        estimator.fit(X_train[train_indices], y_train[train_indices])
+        test_folds_preds.append(estimator.predict(X_train[train_indices]))
+    min_fold_length = fold_lengths.min()
+    test_folds_preds = np.array(
+        [test_fold_preds[:min_fold_length] for test_fold_preds in
+         test_folds_preds])
+    return test_folds_preds
 
 
 class CustomRandint:
@@ -83,7 +80,7 @@ class CustomRandint:
     It can be used with a multiplier agrument to be able to perform more complex generation
     for example 10 e -(randint)"""
 
-    def __init__(self,low=0, high=0, multiplier=""):
+    def __init__(self, low=0, high=0, multiplier=""):
         self.randint = randint(low, high)
         self.multiplier = multiplier
 
@@ -115,7 +112,7 @@ class CustomUniform:
             return unif
 
 
-class BaseMonoviewClassifier(BaseClassifier):#ClassifierMixin):
+class BaseMonoviewClassifier(BaseClassifier):
 
     def get_config(self):
         if self.param_names:
@@ -125,33 +122,33 @@ class BaseMonoviewClassifier(BaseClassifier):#ClassifierMixin):
 
     def get_feature_importance(self, directory, nb_considered_feats=50):
         """Used to generate a graph and a pickle dictionary representing feature importances"""
-        featureImportances = self.feature_importances_
-        sortedArgs = np.argsort(-featureImportances)
-        featureImportancesSorted = featureImportances[sortedArgs][
-                                   :nb_considered_feats]
-        featureIndicesSorted = sortedArgs[:nb_considered_feats]
+        feature_importances = self.feature_importances_
+        sorted_args = np.argsort(-feature_importances)
+        feature_importances_sorted = feature_importances[sorted_args][
+                                     :nb_considered_feats]
+        feature_indices_sorted = sorted_args[:nb_considered_feats]
         fig, ax = plt.subplots()
-        x = np.arange(len(featureIndicesSorted))
+        x = np.arange(len(feature_indices_sorted))
         formatter = FuncFormatter(percent)
         ax.yaxis.set_major_formatter(formatter)
-        plt.bar(x, featureImportancesSorted)
+        plt.bar(x, feature_importances_sorted)
         plt.title("Importance depending on feature")
         fig.savefig(directory + "feature_importances.png", transparent=True)
         plt.close()
-        featuresImportancesDict = dict((featureIndex, featureImportance)
-                                       for featureIndex, featureImportance in
-                                       enumerate(featureImportances)
-                                       if featureImportance != 0)
+        features_importances_dict = dict((featureIndex, featureImportance)
+                                         for featureIndex, featureImportance in
+                                         enumerate(feature_importances)
+                                         if featureImportance != 0)
         with open(directory + 'feature_importances.pickle', 'wb') as handle:
-            pickle.dump(featuresImportancesDict, handle)
-        interpretString = "Feature importances : \n"
-        for featureIndex, featureImportance in zip(featureIndicesSorted,
-                                                   featureImportancesSorted):
+            pickle.dump(features_importances_dict, handle)
+        interpret_string = "Feature importances : \n"
+        for featureIndex, featureImportance in zip(feature_indices_sorted,
+                                                   feature_importances_sorted):
             if featureImportance > 0:
-                interpretString += "- Feature index : " + str(featureIndex) + \
-                                   ", feature importance : " + str(
+                interpret_string += "- Feature index : " + str(featureIndex) + \
+                                    ", feature importance : " + str(
                     featureImportance) + "\n"
-        return interpretString
+        return interpret_string
 
     def get_name_for_fusion(self):
         return self.__class__.__name__[:4]
@@ -164,7 +161,8 @@ def percent(x, pos):
 
 class MonoviewResult(object):
     def __init__(self, view_index, classifier_name, view_name, metrics_scores,
-                 full_labels_pred, classifier_config, test_folds_preds, classifier, n_features):
+                 full_labels_pred, classifier_config, test_folds_preds,
+                 classifier, n_features):
         self.view_index = view_index
         self.classifier_name = classifier_name
         self.view_name = view_name
@@ -178,6 +176,7 @@ class MonoviewResult(object):
     def get_classifier_name(self):
         return self.classifier_name + "-" + self.view_name
 
+
 def get_accuracy_graph(plotted_data, classifier_name, file_name,
                        name="Accuracies", bounds=None, bound_name=None,
                        boosting_bound=None, set="train", zero_to_one=True):
@@ -204,270 +203,3 @@ def get_accuracy_graph(plotted_data, classifier_name, file_name,
         ax.legend((scat,), (name,))
     f.savefig(file_name, transparent=True)
     plt.close()
-
-
-
-# def isUseful(labelSupports, index, CLASS_LABELS, labelDict):
-#     if labelSupports[labelDict[CLASS_LABELS[index]]] != 0:
-#         labelSupports[labelDict[CLASS_LABELS[index]]] -= 1
-#         return True, labelSupports
-#     else:
-#         return False, labelSupports
-#
-#
-# def getLabelSupports(CLASS_LABELS):
-#     labels = set(CLASS_LABELS)
-#     supports = [CLASS_LABELS.tolist().count(label) for label in labels]
-#     return supports, dict((label, index) for label, index in zip(labels, range(len(labels))))
-#
-#
-# def splitDataset(LABELS, NB_CLASS, LEARNING_RATE, DATASET_LENGTH, random_state):
-#     validationIndices = extractRandomTrainingSet(LABELS, 1 - LEARNING_RATE, DATASET_LENGTH, NB_CLASS, random_state)
-#     validationIndices.sort()
-#     return validationIndices
-#
-#
-# def extractRandomTrainingSet(CLASS_LABELS, LEARNING_RATE, DATASET_LENGTH, NB_CLASS, random_state):
-#     labelSupports, labelDict = getLabelSupports(np.array(CLASS_LABELS))
-#     nbTrainingExamples = [int(support * LEARNING_RATE) for support in labelSupports]
-#     trainingExamplesIndices = []
-#     usedIndices = []
-#     while nbTrainingExamples != [0 for i in range(NB_CLASS)]:
-#         isUseFull = False
-#         index = int(random_state.randint(0, DATASET_LENGTH - 1))
-#         if index not in usedIndices:
-#             isUseFull, nbTrainingExamples = isUseful(nbTrainingExamples, index, CLASS_LABELS, labelDict)
-#         if isUseFull:
-#             trainingExamplesIndices.append(index)
-#             usedIndices.append(index)
-#     return trainingExamplesIndices
-
-
-##### Generating Test and Train data
-# def calcTrainTestOwn(X,y,split):
-#
-#     classLabels = pd.Series(y)
-#
-#
-#     data_train = []
-#     data_test = []
-#     label_train = []
-#     label_test = []
-#
-#     # Reminder to store position in array
-#     reminder = 0
-#
-#     for i in classLabels.unique():
-#         # Calculate the number of samples per class
-#         count = (len(classLabels[classLabels==i]))
-#
-#         # Min/Max: To determine the range to read from array
-#         min_train = reminder
-#         max_train = int(round(count * split)) +1 +reminder
-#         min_test = max_train
-#         max_test = count + reminder
-#
-#         #Extend the respective list with ClassLabels(y)/Features(X)
-#         label_train.extend(classLabels[min_train:max_train])
-#         label_test.extend(classLabels[min_test:max_test])
-#         data_train.extend(X[min_train:max_train])
-#         data_test.extend(X[min_test:max_test])
-#
-#         reminder = reminder + count
-#
-#     return np.array(data_train), np.array(data_test), np.array(label_train).astype(int), np.array(label_test).astype(int)
-
-# def calcTrainTest(X,y,split):
-#     X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=split)
-#
-#     return (X_train, X_test, y_train, y_test)
-
-# Classifiers
-
-# ### Random Forest
-#
-# What are they?
-# - Machine learning algorithm built for prediction tasks
-#
-# #### Pros:
-# - Automatically model non-linear relations and interactions between variables. Perfect collinearity doesn't matter.
-# - Easy to tune
-# - Relatively easy to understand everything about them
-# - Flexible enough to handle regression and classification tasks
-# - Is useful as a step in exploratory data analysis
-# - Can handle high dimensional data
-# - Have a built in method of checking to see model accuracy
-# - In general, beats most models at most prediction tasks
-#
-# #### Cons:
-# - ?
-#
-#
-# #### RF Algo
-#
-# The big idea: Combine a bunch of terrible decision trees into one awesome model.
-#
-# For each tree in the forest:
-# 1. Take a bootstrap sample of the data
-# 2. Randomly select some variables.
-# 3. For each variable selected, find the split point which minimizes MSE (or Gini Impurity or Information Gain if classification).
-# 4. Split the data using the variable with the lowest MSE (or other stat).
-# 5. Repeat step 2 through 4 (randomly selecting new sets of variables at each split) until some stopping condition is satisfied or all the data is exhausted.
-#
-# Repeat this process to build several trees.
-#
-# To make a prediction, run an observation down several trees and average the predicted values from all the trees (for regression) or find the most popular class predicted (if classification)
-#
-# #### Most important parameters (and what they mean)
-#
-# - **Parameters that make the model better**
-#     - **n_estimators:** Number of Trees. Choose a number as high as your computer can handle
-#     - **max_features:** Number of features to consider for the best split: Here all!
-#     - **min_samples_leaf:** Minimum number of samples in newly created leaves: Try [1,2,3]. If 3 is best: try higher numbers
-# - **Parameters that will make it easier to train your model**
-#     - **n_jobs:** Number of used CPU's. -1==all. Use %timeit to see speed improvement
-#         - **Problem:** Nikolas PC -> error with multiple CPU...
-#     - **random_state:** Set to 42 if you want others to replicate your results
-#     - **oob_score:** Random Forest Validation method: out-of-bag predictions
-#
-# #### OOB Predictions
-# About a third of observations don't show up in a bootstrap sample.
-#
-# Because an individual tree in the forest is made from a bootstrap sample, it means that about a third of the data was not used to build that tree. We can track which observations were used to build which trees.
-#
-# **Here is the magic.**
-#
-# After the forest is built, we take each observation in the dataset and identify which trees used the observation and which trees did not use the observation (based on the bootstrap sample). We use the trees the observation was not used to build to predict the true value of the observation. About a third of the trees in the forest will not use any specific observation from the dataset.
-#
-# OOB predictions are similar to following awesome, but computationally expensive method:
-#
-# 1. Train a model with n_estimators trees, but exclude one observation from the dataset.
-# 2. Use the trained model to predict the excluded observation. Record the prediction.
-# 3. Repeat this process for every single observation in the dataset.
-# 4. Collect all your final predictions. These will be similar to your oob prediction errors.
-#
-# The leave-one-out method will take n_estimators*time_to_train_one_model*n_observations to run.
-#
-# The oob method will take n_estimators x(times) time_to_train_one_model x(times) 3 to run (the x(times)3 is because if you want to get an accuracy estimate of a 100 tree forest, you will need to train 300 trees. Why? Because with 300 trees each observation will have about 100 trees it was not used to build that can be used for the oob_predictions).
-#
-# This means the oob method is n_observations/3 times faster to train then the leave-one-out method.
-#
-
-# X_test: Test data
-# y_test: Test Labels
-# num_estimators: number of trees
-# def MonoviewClassifRandomForest(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     num_estimators = kwargs["classifier__n_estimators"]
-#     # PipeLine with RandomForest classifier
-#     pipeline_rf = Pipeline([('classifier', RandomForestClassifier())])
-#
-#     # Parameters for GridSearch: Number of Trees
-#     # can be extended with: oob_score, min_samples_leaf, max_features
-#     param_rf = kwargs
-#
-#     # pipeline: Gridsearch avec le pipeline comme estimator
-#     # param: pour obtenir le meilleur model il va essayer tous les possiblites
-#     # refit: pour utiliser le meilleur model apres girdsearch
-#     # n_jobs: Nombre de CPU (Mon ordi a des problemes avec -1 (Bug Python 2.7 sur Windows))
-#     # scoring: scoring...
-#     # cv: Nombre de K-Folds pour CV
-#     grid_rf = GridSearchCV(
-#         pipeline_rf,
-#         param_grid=param_rf,
-#         refit=True,
-#         n_jobs=nbCores,
-#         scoring='accuracy',
-#         cv=nbFolds,
-#     )
-#
-#     rf_detector = grid_rf.fit(X_train, y_train)
-#
-#     desc_estimators = [rf_detector.best_params_["classifier__n_estimators"]]
-#     description = "Classif_" + "RF" + "-" + "CV_" + str(nbFolds) + "-" + "Trees_" + str(map(str, desc_estimators))
-#
-#     return description, rf_detector
-#
-#
-# def MonoviewClassifSVMLinear(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     pipeline_SVMLinear = Pipeline([('classifier', sklearn.svm.SVC())])
-#     param_SVMLinear = kwargs
-#
-#     grid_SVMLinear = GridSearchCV(pipeline_SVMLinear, param_grid=param_SVMLinear, refit=True, n_jobs=nbCores,
-#                                   scoring='accuracy',
-#                                   cv=nbFolds)
-#     SVMLinear_detector = grid_SVMLinear.fit(X_train, y_train)
-#     desc_params = [SVMLinear_detector.best_params_["classifier__C"]]
-#     description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str, desc_params))
-#     return description, SVMLinear_detector
-#
-#
-# def MonoviewClassifSVMRBF(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     pipeline_SVMRBF = Pipeline([('classifier', sklearn.svm.SVC())])
-#     param_SVMRBF = kwargs
-#
-#     grid_SVMRBF = GridSearchCV(pipeline_SVMRBF, param_grid=param_SVMRBF, refit=True, n_jobs=nbCores, scoring='accuracy',
-#                                cv=nbFolds)
-#     SVMRBF_detector = grid_SVMRBF.fit(X_train, y_train)
-#     desc_params = [SVMRBF_detector.best_params_["classifier__C"]]
-#     description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str, desc_params))
-#     return description, SVMRBF_detector
-#
-#
-# def MonoviewClassifDecisionTree(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     pipeline_DT = Pipeline([('classifier', sklearn.tree.DecisionTreeClassifier())])
-#     param_DT = kwargs
-#
-#     grid_DT = GridSearchCV(pipeline_DT, param_grid=param_DT, refit=True, n_jobs=nbCores, scoring='accuracy',
-#                            cv=nbFolds)
-#     DT_detector = grid_DT.fit(X_train, y_train)
-#     desc_params = [DT_detector.best_params_["classifier__max_depth"]]
-#     description = "Classif_" + "DT" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str, desc_params))
-#     return description, DT_detector
-#
-#
-# def MonoviewClassifSGD(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     pipeline_SGD = Pipeline([('classifier', sklearn.linear_model.SGDClassifier())])
-#     param_SGD = kwargs
-#     grid_SGD = GridSearchCV(pipeline_SGD, param_grid=param_SGD, refit=True, n_jobs=nbCores, scoring='accuracy',
-#                             cv=nbFolds)
-#     SGD_detector = grid_SGD.fit(X_train, y_train)
-#     desc_params = [SGD_detector.best_params_["classifier__loss"], SGD_detector.best_params_["classifier__penalty"],
-#                    SGD_detector.best_params_["classifier__alpha"]]
-#     description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str, desc_params))
-#     return description, SGD_detector
-#
-#
-# def MonoviewClassifKNN(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-#     pipeline_KNN = Pipeline([('classifier', sklearn.neighbors.KNeighborsClassifier())])
-#     param_KNN = kwargs
-#     grid_KNN = GridSearchCV(pipeline_KNN, param_grid=param_KNN, refit=True, n_jobs=nbCores, scoring='accuracy',
-#                             cv=nbFolds)
-#     KNN_detector = grid_KNN.fit(X_train, y_train)
-#     desc_params = [KNN_detector.best_params_["classifier__n_neighbors"]]
-#     description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str, desc_params))
-#     return description, KNN_detector
-
-
-# def calcClassifRandomForest(X_train, X_test, y_test, y_train, num_estimators):
-#    from sklearn.grid_search import ParameterGrid
-#    param_rf = { 'classifier__n_estimators': num_estimators}
-#    forest = RandomForestClassifier()
-#
-#    bestgrid=0;
-#    for g in ParameterGrid(grid):
-#        forest.set_params(**g)
-#        forest.fit(X_train,y_train)
-#        score = forest.score(X_test, y_test)
-#
-#        if score > best_score:
-#            best_score = score
-#            best_grid = g
-#
-#    rf_detector = RandomForestClassifier()
-#    rf_detector.set_params(**best_grid)
-#    rf_detector.fit(X_train,y_train)
-
-#    #desc_estimators = best_grid
-#    description = "Classif_" + "RF" + "-" + "CV_" +  "NO" + "-" + "Trees_" + str(best_grid)
-
-#    return (description, rf_detector)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
index 0227e0e159817abe1d2166546ec4bb47295042f3..e1f23cc46dffc9c5aeac01b8b83066b3198651ca 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
@@ -5,7 +5,8 @@ from sklearn.ensemble import AdaBoostClassifier
 from sklearn.tree import DecisionTreeClassifier
 
 from .. import metrics
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, get_accuracy_graph
+from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
+    get_accuracy_graph
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -58,11 +59,11 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
             if base_estimator == "DecisionTreeClassifier":
                 base_estimator = DecisionTreeClassifier()
         AdaBoostClassifier.__init__(self,
-            random_state=random_state,
-            n_estimators=n_estimators,
-            base_estimator=base_estimator,
-            algorithm="SAMME"
-        )
+                                    random_state=random_state,
+                                    n_estimators=n_estimators,
+                                    base_estimator=base_estimator,
+                                    algorithm="SAMME"
+                                    )
         self.param_names = ["n_estimators", "base_estimator"]
         self.classed_params = ["base_estimator"]
         self.distribs = [CustomRandint(low=1, high=500),
@@ -147,4 +148,3 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
         np.savetxt(directory + "times.csv",
                    np.array([self.train_time, self.pred_time]), delimiter=',')
         return interpretString
-
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
index fabf2a72ad74467f00114c298fdb86fdf72476f7..7510971311b0c3162f4888e718552962984d0434 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
@@ -14,11 +14,11 @@ class DecisionTree(DecisionTreeClassifier, BaseMonoviewClassifier):
     def __init__(self, random_state=None, max_depth=None,
                  criterion='gini', splitter='best', **kwargs):
         DecisionTreeClassifier.__init__(self,
-            max_depth=max_depth,
-            criterion=criterion,
-            splitter=splitter,
-            random_state=random_state
-        )
+                                        max_depth=max_depth,
+                                        criterion=criterion,
+                                        splitter=splitter,
+                                        random_state=random_state
+                                        )
         self.param_names = ["max_depth", "criterion", "splitter",
                             'random_state']
         self.classed_params = []
@@ -28,7 +28,8 @@ class DecisionTree(DecisionTreeClassifier, BaseMonoviewClassifier):
         self.weird_strings = {}
 
     def get_interpretation(self, directory, y_test):
-        interpretString = "First featrue : \n\t{} <= {}\n".format(self.tree_.feature[0],
-                                                               self.tree_.threshold[0])
+        interpretString = "First featrue : \n\t{} <= {}\n".format(
+            self.tree_.feature[0],
+            self.tree_.threshold[0])
         interpretString += self.get_feature_importance(directory)
         return interpretString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
index 4b06adee49b8e0a7bc7b067562e16b113c103dc3..bf8cccb2f63c5a3372fe642cd9cc508e84efea23 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
@@ -5,19 +5,20 @@ from sklearn.ensemble import GradientBoostingClassifier
 from sklearn.tree import DecisionTreeClassifier
 
 from .. import metrics
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, get_accuracy_graph
+from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
+    get_accuracy_graph
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "GradientBoosting"
 
+
 class CustomDecisionTree(DecisionTreeClassifier):
     def predict(self, X, check_input=True):
         y_pred = DecisionTreeClassifier.predict(self, X,
-                                                         check_input=check_input)
+                                                check_input=check_input)
         return y_pred.reshape((y_pred.shape[0], 1)).astype(float)
 
 
@@ -28,12 +29,12 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
                  init=CustomDecisionTree(max_depth=1),
                  **kwargs):
         GradientBoostingClassifier.__init__(self,
-            loss=loss,
-            max_depth=max_depth,
-            n_estimators=n_estimators,
-            init=init,
-            random_state=random_state
-        )
+                                            loss=loss,
+                                            max_depth=max_depth,
+                                            n_estimators=n_estimators,
+                                            init=init,
+                                            random_state=random_state
+                                            )
         self.param_names = ["n_estimators", ]
         self.classed_params = []
         self.distribs = [CustomRandint(low=50, high=500), ]
@@ -81,10 +82,13 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
                                directory + "test_metrics.png",
                                self.plotted_metric_name, set="test")
             get_accuracy_graph(self.metrics, "AdaboostClassic",
-                               directory + "metrics.png", self.plotted_metric_name)
+                               directory + "metrics.png",
+                               self.plotted_metric_name)
             np.savetxt(directory + "test_metrics.csv", step_test_metrics,
                        delimiter=',')
-            np.savetxt(directory + "train_metrics.csv", self.metrics, delimiter=',')
+            np.savetxt(directory + "train_metrics.csv", self.metrics,
+                       delimiter=',')
             np.savetxt(directory + "times.csv",
-                       np.array([self.train_time, self.pred_time]), delimiter=',')
+                       np.array([self.train_time, self.pred_time]),
+                       delimiter=',')
             return interpretString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
index 9b164f995ce5e00f64749f80129578dcf6fc16e6..01dfb1bb379a6850f0e2751cd8e2dab6ebc874f3 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
@@ -6,7 +6,6 @@ from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "KNN"
 
 
@@ -24,15 +23,15 @@ class KNN(KNeighborsClassifier, BaseMonoviewClassifier):
     p
     kwargs
     """
+
     def __init__(self, random_state=None, n_neighbors=5,
                  weights='uniform', algorithm='auto', p=2, **kwargs):
-
         KNeighborsClassifier.__init__(self,
-            n_neighbors=n_neighbors,
-            weights=weights,
-            algorithm=algorithm,
-            p=p
-        )
+                                      n_neighbors=n_neighbors,
+                                      weights=weights,
+                                      algorithm=algorithm,
+                                      p=p
+                                      )
         self.param_names = ["n_neighbors", "weights", "algorithm", "p",
                             "random_state", ]
         self.classed_params = []
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
index 1c6de6a16c002a921f586d527b00e5e4a9dc5552..1b93ec2c7185b2ab60bf4400e568b0b8253c5397 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
@@ -8,9 +8,9 @@ from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "Lasso"
 
+
 class Lasso(LassoSK, BaseMonoviewClassifier):
     """
 
@@ -45,14 +45,15 @@ class Lasso(LassoSK, BaseMonoviewClassifier):
     weird_strings :
 
     """
+
     def __init__(self, random_state=None, alpha=1.0,
                  max_iter=10, warm_start=False, **kwargs):
         LassoSK.__init__(self,
-            alpha=alpha,
-            max_iter=max_iter,
-            warm_start=warm_start,
-            random_state=random_state
-        )
+                         alpha=alpha,
+                         max_iter=max_iter,
+                         warm_start=warm_start,
+                         random_state=random_state
+                         )
         self.param_names = ["max_iter", "alpha", "random_state"]
         self.classed_params = []
         self.distribs = [CustomRandint(low=1, high=300),
@@ -87,4 +88,4 @@ class Lasso(LassoSK, BaseMonoviewClassifier):
         interpreted string, str interpret_string
         """
         interpret_string = ""
-        return interpret_string
\ No newline at end of file
+        return interpret_string
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
index ec7fd6ba631eaf28279801c529a068746dba7871..82a442d99c42ac96604ac36e4469fd1288eb0b6f 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
@@ -6,9 +6,9 @@ from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "RandomForest"
 
+
 class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
     """RandomForest Classifier Class
 
@@ -38,6 +38,7 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
     weird_strings :
 
     """
+
     def __init__(self, random_state=None, n_estimators=10,
                  max_depth=None, criterion='gini', **kwargs):
         """
@@ -51,11 +52,11 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
         kwargs
         """
         RandomForestClassifier.__init__(self,
-            n_estimators=n_estimators,
-            max_depth=max_depth,
-            criterion=criterion,
-            random_state=random_state
-        )
+                                        n_estimators=n_estimators,
+                                        max_depth=max_depth,
+                                        criterion=criterion,
+                                        random_state=random_state
+                                        )
         self.param_names = ["n_estimators", "max_depth", "criterion",
                             "random_state"]
         self.classed_params = []
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
index 34baf66412a2c6505e06a164be63e291eb65f288..7318f9677e63ceaa266b7f18188a119693b25f89 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
@@ -8,6 +8,7 @@ __status__ = "Prototype"  # Production, Development, Prototype
 
 classifier_class_name = "SGD"
 
+
 class SGD(SGDClassifier, BaseMonoviewClassifier):
     """
 
@@ -36,17 +37,17 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
     weird_strings :
 
     """
+
     def __init__(self, random_state=None, loss='hinge',
                  penalty='l2', alpha=0.0001, max_iter=5, tol=None, **kwargs):
-
         SGDClassifier.__init__(self,
-            loss=loss,
-            penalty=penalty,
-            alpha=alpha,
-            max_iter=5,
-            tol=None,
-            random_state=random_state
-        )
+                               loss=loss,
+                               penalty=penalty,
+                               alpha=alpha,
+                               max_iter=5,
+                               tol=None,
+                               random_state=random_state
+                               )
         self.param_names = ["loss", "penalty", "alpha", "random_state"]
         self.classed_params = []
         self.distribs = [['log', 'modified_huber'],
@@ -68,6 +69,5 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
         interpret_string str to interpreted
         """
         interpret_string = ""
-        import numpy as np
         # self.feature_importances_ = (self.coef_/np.sum(self.coef_)).reshape(self.coef_.shape[1])
         return interpret_string
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
index d29b92f4b474b5c621765590e948700293a01fbb..1c188b2bd60323de56f558501e77d6100e60903a 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
@@ -1,15 +1,14 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import SVCClassifier
+from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
+    SVCClassifier
 from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "SVMLinear"
 
+
 class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
     """SVMLinear
 
@@ -26,13 +25,13 @@ class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
     kwargs : others arguments
 
     """
-    def __init__(self, random_state=None, C=1.0, **kwargs):
 
+    def __init__(self, random_state=None, C=1.0, **kwargs):
         SVCClassifier.__init__(self,
-            C=C,
-            kernel='linear',
-            random_state=random_state
-        )
+                               C=C,
+                               kernel='linear',
+                               random_state=random_state
+                               )
         self.param_names = ["C", "random_state"]
         self.distribs = [CustomUniform(loc=0, state=1), [random_state]]
 
@@ -40,4 +39,3 @@ class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
         interpret_string = ""
         # self.feature_importances_ = (self.coef_/np.sum(self.coef_)).reshape((self.coef_.shape[1],))
         return interpret_string
-
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
index 351fc05ad970940730bf1b2b326d55371d95636a..2d8ef210346450da4200c3ae287e9612698876bd 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
@@ -1,4 +1,5 @@
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import SVCClassifier
+from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
+    SVCClassifier
 from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
     BaseMonoviewClassifier
 
@@ -8,6 +9,7 @@ __status__ = "Prototype"  # Production, Development, Prototype
 
 classifier_class_name = "SVMPoly"
 
+
 class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
     """
     Class of SVMPoly for SVC Classifier
@@ -35,14 +37,14 @@ class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
 
     distribs :  list of random_state distribution
     """
-    def __init__(self, random_state=None, C=1.0, degree=3, **kwargs):
 
+    def __init__(self, random_state=None, C=1.0, degree=3, **kwargs):
         SVCClassifier.__init__(self,
-            C=C,
-            kernel='poly',
-            degree=degree,
-            random_state=random_state
-        )
+                               C=C,
+                               kernel='poly',
+                               degree=degree,
+                               random_state=random_state
+                               )
         self.param_names = ["C", "degree", "random_state"]
         self.distribs = [CustomUniform(loc=0, state=1),
                          CustomRandint(low=2, high=30), [random_state]]
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
index 9dbea0e0573726d29bd1fd148906ff1cf49334bf..325afa102560d1e17e9665a39e82b09045f8fe74 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
@@ -1,11 +1,11 @@
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import SVCClassifier
+from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
+    SVCClassifier
 from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
-
 classifier_class_name = "SVMRBF"
 
 
@@ -30,13 +30,12 @@ class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
 
     distribs :  list of random_state distribution
     """
-    def __init__(self, random_state=None, C=1.0, **kwargs):
 
+    def __init__(self, random_state=None, C=1.0, **kwargs):
         SVCClassifier.__init__(self,
-            C=C,
-            kernel='rbf',
-            random_state=random_state
-        )
+                               C=C,
+                               kernel='rbf',
+                               random_state=random_state
+                               )
         self.param_names = ["C", "random_state"]
         self.distribs = [CustomUniform(loc=0, state=1), [random_state]]
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
index c0040cf7081fdbdfa3c0c9d640fe8e4e3b5e43f3..61a6996930f6751ab4795670a88e831a3649b70c 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
@@ -5,13 +5,13 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
-def printMetricScore(metricScores, metric_list):
+def print_metric_score(metric_scores, metric_list):
     """
     this function print the metrics scores
 
     Parameters
     ----------
-    metricScores : the score of metrics
+    metric_scores : the score of metrics
 
     metric_list : list of metrics
 
@@ -23,22 +23,24 @@ def printMetricScore(metricScores, metric_list):
     for metric in metric_list:
         metric_module = getattr(metrics, metric[0])
         if metric[1] is not None:
-            metric_kwargs = dict((index, metricConfig) for index, metricConfig in
-                                enumerate(metric[1]))
+            metric_kwargs = dict(
+                (index, metricConfig) for index, metricConfig in
+                enumerate(metric[1]))
         else:
             metric_kwargs = {}
         metric_score_string += "\tFor " + metric_module.get_config(
             **metric_kwargs) + " : "
         metric_score_string += "\n\t\t- Score on train : " + str(
-            metricScores[metric[0]][0])
+            metric_scores[metric[0]][0])
         metric_score_string += "\n\t\t- Score on test : " + str(
-            metricScores[metric[0]][1])
+            metric_scores[metric[0]][1])
         metric_score_string += "\n\n"
     return metric_score_string
 
 
-def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices,
-                         learningIndices, labels):
+def get_total_metric_scores(metric, train_labels, test_labels,
+                            validation_indices,
+                            learning_indices, labels):
     """
 
     Parameters
@@ -46,51 +48,53 @@ def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices,
 
     metric :
 
-    trainLabels : labels of train
+    train_labels : labels of train
 
-    testLabels :  labels of test
+    test_labels :  labels of test
 
-    validationIndices :
+    validation_indices :
 
-    learningIndices :
+    learning_indices :
 
     labels :
 
     Returns
     -------
-    list of [trainScore, testScore]
+    list of [train_score, test_score]
     """
-    metricModule = getattr(metrics, metric[0])
+    metric_module = getattr(metrics, metric[0])
     if metric[1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in
-                            enumerate(metric[1]))
+        metric_kwargs = dict((index, metricConfig) for index, metricConfig in
+                             enumerate(metric[1]))
     else:
-        metricKWARGS = {}
-    trainScore = metricModule.score(labels[learningIndices], trainLabels,
-                                        **metricKWARGS)
-    testScore = metricModule.score(labels[validationIndices], testLabels,
-                                   **metricKWARGS)
-    return [trainScore, testScore]
+        metric_kwargs = {}
+    train_score = metric_module.score(labels[learning_indices], train_labels,
+                                      **metric_kwargs)
+    test_score = metric_module.score(labels[validation_indices], test_labels,
+                                     **metric_kwargs)
+    return [train_score, test_score]
 
 
-def getMetricsScores(metrics, trainLabels, testLabels,
-                     validationIndices, learningIndices, labels):
-    metricsScores = {}
+def get_metrics_scores(metrics, train_labels, test_labels,
+                       validation_indices, learning_indices, labels):
+    metrics_scores = {}
     for metric in metrics:
-        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels,
-                                                        testLabels,
-                                                        validationIndices,
-                                                        learningIndices, labels)
-    return metricsScores
+        metrics_scores[metric[0]] = get_total_metric_scores(metric,
+                                                            train_labels,
+                                                            test_labels,
+                                                            validation_indices,
+                                                            learning_indices,
+                                                            labels)
+    return metrics_scores
 
 
 def execute(classifier, pred_train_labels,
             pred_test_labels, DATASET,
-            classificationKWARGS, classificationIndices,
-            labels_dictionary, views, nbCores, times,
-            name, KFolds,
-            hyper_param_search, nIter, metric_list,
-            views_indices, random_state, labels, classifierModule,
+            classification_kwargs, classification_indices,
+            labels_dictionary, views, nb_cores, times,
+            name, k_folds,
+            hyper_param_search, n_iter, metric_list,
+            views_indices, random_state, labels, classifier_module,
             directory):
     """
 
@@ -104,25 +108,25 @@ def execute(classifier, pred_train_labels,
 
     DATASET :
 
-    classificationKWARGS
+    classification_kwargs
 
-    classificationIndices
+    classification_indices
 
     labels_dictionary
 
     views
 
-    nbCores
+    nb_cores
 
     times
 
     name
 
-    KFolds
+    k_folds
 
     hyper_param_search
 
-    nIter
+    n_iter
 
     metric_list
 
@@ -132,40 +136,43 @@ def execute(classifier, pred_train_labels,
 
     labels
 
-    classifierModule
+    classifier_module
 
     Returns
     -------
-    retuern tuple of (stringAnalysis, imagesAnalysis, metricsScore)
+    retuern tuple of (string_analysis, images_analysis, metricsScore)
     """
     classifier_name = classifier.short_name
-    learning_indices, validation_indices = classificationIndices
-    metricModule = getattr(metrics, metric_list[0][0])
+    learning_indices, validation_indices = classification_indices
+    metric_module = getattr(metrics, metric_list[0][0])
     if metric_list[0][1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in
-                            enumerate(metric_list[0][1]))
+        metric_kwargs = dict((index, metricConfig) for index, metricConfig in
+                             enumerate(metric_list[0][1]))
     else:
-        metricKWARGS = {}
-    scoreOnTrain = metricModule.score(labels[learning_indices],
-                                      pred_train_labels,
-                                      **metricKWARGS)
-    scoreOnTest = metricModule.score(labels[validation_indices],
-                                     pred_test_labels, **metricKWARGS)
-
-    stringAnalysis = "\t\tResult for multiview classification with " + classifier_name + \
-                     "\n\n" + metric_list[0][0] + " :\n\t-On Train : " + str(
-        scoreOnTrain) + "\n\t-On Test : " + str(
-        scoreOnTest) + \
-                     "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
-                     ', '.join(
-                         labels_dictionary.values()) + "\n\t-Views : " + ', '.join(
+        metric_kwargs = {}
+    score_on_train = metric_module.score(labels[learning_indices],
+                                         pred_train_labels,
+                                         **metric_kwargs)
+    score_on_test = metric_module.score(labels[validation_indices],
+                                        pred_test_labels, **metric_kwargs)
+
+    string_analysis = "\t\tResult for multiview classification with " + classifier_name + \
+                      "\n\n" + metric_list[0][0] + " :\n\t-On Train : " + str(
+        score_on_train) + "\n\t-On Test : " + str(
+        score_on_test) + \
+                      "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
+                      ', '.join(
+                          labels_dictionary.values()) + "\n\t-Views : " + ', '.join(
         views) + "\n\t-" + str(
-        KFolds.n_splits) + \
-                     " folds\n\nClassification configuration : \n\t-Algorithm used : " + classifier_name + " with : " + classifier.get_config()
-
-    metricsScores = getMetricsScores(metric_list, pred_train_labels, pred_test_labels,
-                                     validation_indices, learning_indices, labels)
-    stringAnalysis += printMetricScore(metricsScores, metric_list)
-    stringAnalysis += "\n\n Interpretation : \n\n" + classifier.get_interpretation(directory, labels)
-    imagesAnalysis = {}
-    return stringAnalysis, imagesAnalysis, metricsScores
+        k_folds.n_splits) + \
+                      " folds\n\nClassification configuration : \n\t-Algorithm used : " + classifier_name + " with : " + classifier.get_config()
+
+    metrics_scores = get_metrics_scores(metric_list, pred_train_labels,
+                                        pred_test_labels,
+                                        validation_indices, learning_indices,
+                                        labels)
+    string_analysis += print_metric_score(metrics_scores, metric_list)
+    string_analysis += "\n\n Interpretation : \n\n" + classifier.get_interpretation(
+        directory, labels)
+    images_analysis = {}
+    return string_analysis, images_analysis, metrics_scores
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py b/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py
index 192bd3d63ab098338463602dfe56e162c235f23e..f5c5a880a55448e37c57572c122f119f7f4bcf0c 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py
@@ -7,8 +7,8 @@ import time
 import h5py
 import numpy as np
 
-from .multiview_utils import MultiviewResult
 from . import analyze_results
+from .multiview_utils import MultiviewResult
 from .. import multiview_classifiers
 from ..utils import hyper_parameter_search
 from ..utils.multiclass import get_mc_estim
@@ -64,11 +64,12 @@ def init_constants(kwargs, classification_indices, metrics,
         logging.info("Info:\t Shape of " + str(view_name) + " :" + str(
             dataset_var.get_shape()))
     labels = dataset_var.get_labels()
-    return classifier_name, t_start, views_indices,\
-           classifier_config, views, learning_rate,labels
+    return classifier_name, t_start, views_indices, \
+           classifier_config, views, learning_rate, labels
 
 
-def save_results(classifier, labels_dictionary, string_analysis, views, classifier_module,
+def save_results(classifier, labels_dictionary, string_analysis, views,
+                 classifier_module,
                  classification_kargs, directory, learning_rate, name,
                  images_analysis):
     """
@@ -103,8 +104,8 @@ def save_results(classifier, labels_dictionary, string_analysis, views, classifi
     # views_string = "-".join(views)
     views_string = "mv"
     cl_type_string = classifier.short_name
-    output_file_name = os.path.join(directory,  cl_type_string,
-                                    cl_type_string+"-"+views_string+'-'+name)
+    output_file_name = os.path.join(directory, cl_type_string,
+                                    cl_type_string + "-" + views_string + '-' + name)
     if not os.path.exists(os.path.dirname(output_file_name)):
         try:
             os.makedirs(os.path.dirname(output_file_name))
@@ -122,14 +123,16 @@ def save_results(classifier, labels_dictionary, string_analysis, views, classifi
                     test_file_name = output_file_name + image_name + "-" + str(
                         i) + ".png"
                     if not os.path.isfile(testFileName):
-                        images_analysis[image_name].savefig(test_file_name, transparent=True)
+                        images_analysis[image_name].savefig(test_file_name,
+                                                            transparent=True)
                         break
 
             images_analysis[image_name].savefig(
                 output_file_name + image_name + '.png', transparent=True)
 
 
-def exec_multiview_multicore(directory, core_index, name, learning_rate, nb_folds,
+def exec_multiview_multicore(directory, core_index, name, learning_rate,
+                             nb_folds,
                              database_type, path, labels_dictionary,
                              random_state, labels,
                              hyper_param_search=False, nb_cores=1, metrics=None,
@@ -182,14 +185,17 @@ def exec_multiview_multicore(directory, core_index, name, learning_rate, nb_fold
     """
     """Used to load an HDF5 dataset_var for each parallel job and execute multiview classification"""
     dataset_var = h5py.File(path + name + str(core_index) + ".hdf5", "r")
-    return exec_multiview(directory, dataset_var, name, learning_rate, nb_folds, 1,
+    return exec_multiview(directory, dataset_var, name, learning_rate, nb_folds,
+                          1,
                           database_type, path, labels_dictionary,
                           random_state, labels,
-                          hyper_param_search=hyper_param_search, metrics=metrics,
+                          hyper_param_search=hyper_param_search,
+                          metrics=metrics,
                           n_iter=n_iter, **arguments)
 
 
-def exec_multiview(directory, dataset_var, name, classification_indices, k_folds,
+def exec_multiview(directory, dataset_var, name, classification_indices,
+                   k_folds,
                    nb_cores, database_type, path,
                    labels_dictionary, random_state, labels,
                    hyper_param_search=False, metrics=None, n_iter=30, **kwargs):
@@ -243,9 +249,9 @@ def exec_multiview(directory, dataset_var, name, classification_indices, k_folds
     views_indices, \
     classifier_config, \
     views, \
-    learning_rate,\
+    learning_rate, \
     labels = init_constants(kwargs, classification_indices, metrics, name,
-                                   nb_cores, k_folds, dataset_var)
+                            nb_cores, k_folds, dataset_var)
     logging.debug("Done:\t Initialize constants")
 
     extraction_time = time.time() - t_start
@@ -266,33 +272,39 @@ def exec_multiview(directory, dataset_var, name, classification_indices, k_folds
     logging.debug("Start:\t Optimizing hyperparameters")
     if hyper_param_search != "None":
         classifier_config = hyper_parameter_search.search_best_settings(
-            dataset_var, dataset_var.get_labels(), classifier_module, classifier_name,
+            dataset_var, dataset_var.get_labels(), classifier_module,
+            classifier_name,
             metrics[0], learning_indices, k_folds, random_state,
             directory, nb_cores=nb_cores, views_indices=views_indices,
             searching_tool=hyper_param_search, n_iter=n_iter,
             classifier_config=classifier_config)
-    classifier = get_mc_estim(getattr(classifier_module, classifier_name)(random_state=random_state,
-                                                             **classifier_config),
-                              random_state, multiview=True,
-                              y=dataset_var.get_labels())
+    classifier = get_mc_estim(
+        getattr(classifier_module, classifier_name)(random_state=random_state,
+                                                    **classifier_config),
+        random_state, multiview=True,
+        y=dataset_var.get_labels())
     logging.debug("Done:\t Optimizing hyperparameters")
     logging.debug("Start:\t Fitting classifier")
-    classifier.fit(dataset_var, dataset_var.get_labels(), train_indices=learning_indices,
-                        view_indices=views_indices)
+    classifier.fit(dataset_var, dataset_var.get_labels(),
+                   train_indices=learning_indices,
+                   view_indices=views_indices)
     logging.debug("Done:\t Fitting classifier")
 
     logging.debug("Start:\t Predicting")
-    pred_train_labels = classifier.predict(dataset_var, example_indices=learning_indices,
-                                      view_indices=views_indices)
-    pred_test_labels = classifier.predict(dataset_var, example_indices=validation_indices,
-                                     view_indices=views_indices)
+    pred_train_labels = classifier.predict(dataset_var,
+                                           example_indices=learning_indices,
+                                           view_indices=views_indices)
+    pred_test_labels = classifier.predict(dataset_var,
+                                          example_indices=validation_indices,
+                                          view_indices=views_indices)
     full_labels = np.zeros(dataset_var.get_labels().shape, dtype=int) - 100
     full_labels[learning_indices] = pred_train_labels
     full_labels[validation_indices] = pred_test_labels
     logging.info("Done:\t Pertidcting")
 
     classification_time = time.time() - t_start
-    logging.info("Info:\t Classification duration " + str(extraction_time) + "s")
+    logging.info(
+        "Info:\t Classification duration " + str(extraction_time) + "s")
 
     # TODO: get better cltype
 
@@ -309,7 +321,8 @@ def exec_multiview(directory, dataset_var, name, classification_indices, k_folds
     logging.info("Done:\t Result Analysis for " + cl_type)
 
     logging.debug("Start:\t Saving preds")
-    save_results(classifier, labels_dictionary, string_analysis, views, classifier_module,
+    save_results(classifier, labels_dictionary, string_analysis, views,
+                 classifier_module,
                  classifier_config, directory,
                  learning_rate, name, images_analysis)
     logging.debug("Start:\t Saving preds")
@@ -410,7 +423,8 @@ if __name__ == "__main__":
     if args.log:
         logging.getLogger().addHandler(logging.StreamHandler())
 
-    res = exec_multiview(directory, dataset_var, name, classification_indices, k_folds,
+    res = exec_multiview(directory, dataset_var, name, classification_indices,
+                         k_folds,
                          nb_cores, databaseType, path,
                          labels_dictionary, random_state, labels,
                          hyper_param_search=hyper_param_search, metrics=metrics,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
index 57062546fe274e34ead2ea579b8fc8673b44c576..4fd5fb851f55f49451c024ecab63d649d42818ca 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
@@ -1,12 +1,11 @@
-from sklearn.base import BaseEstimator, ClassifierMixin
 from abc import abstractmethod
+
 import numpy as np
 
+from .. import monoview_classifiers
 from ..utils.base import BaseClassifier
 from ..utils.dataset import RAMDataset
 
-from .. import monoview_classifiers
-
 
 class FakeEstimator():
 
@@ -42,35 +41,37 @@ class BaseMultiviewClassifier(BaseClassifier):
     def to_str(self, param_name):
         if param_name in self.weird_strings:
             string = ""
-            if "class_name" in self.weird_strings[param_name] :
-                string+=self.get_params()[param_name].__class__.__name__
+            if "class_name" in self.weird_strings[param_name]:
+                string += self.get_params()[param_name].__class__.__name__
             if "config" in self.weird_strings[param_name]:
-                string += "( with "+ self.get_params()[param_name].params_to_string()+")"
+                string += "( with " + self.get_params()[
+                    param_name].params_to_string() + ")"
             else:
-                string+=self.weird_strings[param_name](
+                string += self.weird_strings[param_name](
                     self.get_params()[param_name])
             return string
         else:
             return str(self.get_params()[param_name])
 
     def accepts_multi_class(self, random_state, n_samples=10, dim=2,
-                           n_classes=3, n_views=2):
+                            n_classes=3, n_views=2):
         if int(n_samples / n_classes) < 1:
             raise ValueError(
                 "n_samples ({}) / n_classe ({}) must be over 1".format(
                     n_samples,
                     n_classes))
-        fake_mc_X = RAMDataset(views= [random_state.random_integers(low=0, high=100,
-                                                 size=(n_samples, dim))
-                                    for i in range(n_views)],
-                            labels=[class_index
-                     for _ in range(int(n_samples / n_classes))
-                     for class_index in range(n_classes)],
-                            are_sparse=False,
-                            name="mc_dset",
-                            labels_names=[str(class_index) for class_index in range(n_classes)],
-                            view_names=["V0", "V1"],
-                            )
+        fake_mc_X = RAMDataset(
+            views=[random_state.random_integers(low=0, high=100,
+                                                size=(n_samples, dim))
+                   for i in range(n_views)],
+            labels=[class_index
+                    for _ in range(int(n_samples / n_classes))
+                    for class_index in range(n_classes)],
+            are_sparse=False,
+            name="mc_dset",
+            labels_names=[str(class_index) for class_index in range(n_classes)],
+            view_names=["V0", "V1"],
+            )
 
         fake_mc_y = [class_index
                      for _ in range(int(n_samples / n_classes))
@@ -92,10 +93,13 @@ class ConfigGenerator():
         for classifier_name in classifier_names:
             classifier_class = get_monoview_classifier(classifier_name)
             self.distribs[classifier_name] = dict((param_name, param_distrib)
-                                  for param_name, param_distrib in
-                                  zip(classifier_class().param_names,
-                                      classifier_class().distribs)
-                                if param_name!="random_state")
+                                                  for param_name, param_distrib
+                                                  in
+                                                  zip(
+                                                      classifier_class().param_names,
+                                                      classifier_class().distribs)
+                                                  if
+                                                  param_name != "random_state")
 
     def rvs(self, random_state=None):
         config_sample = {}
@@ -103,17 +107,21 @@ class ConfigGenerator():
             config_sample[classifier_name] = {}
             for param_name, param_distrib in classifier_config.items():
                 if hasattr(param_distrib, "rvs"):
-                    config_sample[classifier_name][param_name]=param_distrib.rvs(random_state=random_state)
+                    config_sample[classifier_name][
+                        param_name] = param_distrib.rvs(
+                        random_state=random_state)
                 else:
                     config_sample[classifier_name][
-                        param_name] = param_distrib[random_state.randint(len(param_distrib))]
+                        param_name] = param_distrib[
+                        random_state.randint(len(param_distrib))]
         return config_sample
 
 
 def get_available_monoview_classifiers(need_probas=False):
     available_classifiers = [module_name
-                         for module_name in dir(monoview_classifiers)
-                         if not (module_name.startswith("__") or module_name=="additions")]
+                             for module_name in dir(monoview_classifiers)
+                             if not (
+                    module_name.startswith("__") or module_name == "additions")]
     if need_probas:
         proba_classifiers = []
         for module_name in available_classifiers:
@@ -125,12 +133,17 @@ def get_available_monoview_classifiers(need_probas=False):
         available_classifiers = proba_classifiers
     return available_classifiers
 
+
 def get_monoview_classifier(classifier_name, multiclass=False):
     classifier_module = getattr(monoview_classifiers, classifier_name)
-    classifier_class = getattr(classifier_module, classifier_module.classifier_class_name)
+    classifier_class = getattr(classifier_module,
+                               classifier_module.classifier_class_name)
     return classifier_class
 
+
 from .. import multiview_classifiers
+
+
 class MultiviewResult(object):
     def __init__(self, classifier_name, classifier_config,
                  metrics_scores, full_labels):
@@ -142,9 +155,10 @@ class MultiviewResult(object):
     def get_classifier_name(self):
         try:
             multiview_classifier_module = getattr(multiview_classifiers,
-                                                self.classifier_name)
+                                                  self.classifier_name)
             multiview_classifier = getattr(multiview_classifier_module,
-                                           multiview_classifier_module.classifier_class_name)(42)
+                                           multiview_classifier_module.classifier_class_name)(
+                42)
             return multiview_classifier.short_name
         except:
-            return self.classifier_name
\ No newline at end of file
+            return self.classifier_name
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py
index 1ea37fbbff987b6220a141dcc6dd5853b5a40482..6e242133fa45a01a2506f423a543c742390259be 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py
@@ -1,7 +1,9 @@
 import os
 
 for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
-    if module == '__init__.py' or module[-4:] == '.pyc' or module == '__pycache__' or module[-3:] != '.py':
+    if module == '__init__.py' or module[
+                                  -4:] == '.pyc' or module == '__pycache__' or module[
+                                                                               -3:] != '.py':
         continue
     __import__(module[:-3], locals(), globals(), [], 1)
 del module
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/data_sample.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/data_sample.py
index f31537feaab8537d8e54385dc078618787db13f0..f584284b0b7389c0d733a2ee1068c51239b1ac50 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/data_sample.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/data_sample.py
@@ -46,6 +46,7 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
     >>> data.data
 
     """
+
     def __new__(cls, data, view_ind=None):
 
         shapes_int = []
@@ -53,7 +54,7 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
         new_data = np.ndarray([])
         n_views = len(data)
         thekeys = None
-        view_ind_self =  None
+        view_ind_self = None
         if isinstance(data, dict):
             n_views = len(data)
             for key, dat_values in data.items():
@@ -68,16 +69,17 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
                 new_data = cls._populate_new_data(index, dat_values, new_data)
                 index += 1
         elif isinstance(data, np.ndarray) and data.ndim > 1:
-            if  view_ind is not None:
+            if view_ind is not None:
                 n_views = view_ind.shape[0]
-                shapes_int = [  in2-in1  for in1, in2 in  zip(view_ind, view_ind[1: ])]
+                shapes_int = [in2 - in1 for in1, in2 in
+                              zip(view_ind, view_ind[1:])]
             elif view_ind is None:
                 if data.shape[1] > 1:
-                    view_ind = np.array([0, data.shape[1]//2, data.shape[1]])
+                    view_ind = np.array([0, data.shape[1] // 2, data.shape[1]])
                 else:
                     view_ind = np.array([0, data.shape[1]])
                 view_ind, n_views = cls._validate_views_ind(view_ind,
-                                                              data.shape[1])
+                                                            data.shape[1])
             new_data = data
             view_ind_self = view_ind
 
@@ -99,16 +101,20 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
     @staticmethod
     def _populate_new_data(index, dat_values, new_data):
         if index == 0:
-            if isinstance(dat_values, ma.MaskedArray)  or isinstance(dat_values, np.ndarray):
+            if isinstance(dat_values, ma.MaskedArray) or isinstance(dat_values,
+                                                                    np.ndarray):
                 new_data = dat_values
             else:
-                new_data = dat_values.view(ma.MaskedArray) #  ma.masked_array(dat_values, mask=ma.nomask) dat_values.view(ma.MaskedArray) #(
+                new_data = dat_values.view(
+                    ma.MaskedArray)  # ma.masked_array(dat_values, mask=ma.nomask) dat_values.view(ma.MaskedArray) #(
                 new_data.mask = ma.nomask
         else:
-            if isinstance(dat_values, ma.MaskedArray) or isinstance(dat_values, np.ndarray):
+            if isinstance(dat_values, ma.MaskedArray) or isinstance(dat_values,
+                                                                    np.ndarray):
                 new_data = ma.hstack((new_data, dat_values))
             else:
-                new_data = ma.hstack((new_data,  dat_values.view(ma.MaskedArray) ) ) #  ma.masked_array(dat_values, mask=ma.nomask
+                new_data = ma.hstack((new_data, dat_values.view(
+                    ma.MaskedArray)))  # ma.masked_array(dat_values, mask=ma.nomask
         return new_data
 
     def __array_finalize__(self, obj):
@@ -121,7 +127,7 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
 
     def get_col(self, view, col):
         start = np.sum(np.asarray(self.shapes_int[0: view]))
-        return self.data[start+col, :]
+        return self.data[start + col, :]
 
     def get_view(self, view):
         start = int(np.sum(np.asarray(self.shapes_int[0: view])))
@@ -131,30 +137,32 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
     def set_view(self, view, data):
         start = int(np.sum(np.asarray(self.shapes_int[0: view])))
         stop = int(start + self.shapes_int[view])
-        if stop-start == data.shape[0] and data.shape[1]== self.data.shape[1]:
-             self.data[:, start:stop] = data
+        if stop - start == data.shape[0] and data.shape[1] == self.data.shape[
+            1]:
+            self.data[:, start:stop] = data
         else:
             raise ValueError(
-                "shape of data does not match (%d, %d)" %stop-start %self.data.shape[1])
+                "shape of data does not match (%d, %d)" % stop - start %
+                self.data.shape[1])
 
     def get_raw(self, view, raw):
         start = np.sum(np.asarray(self.shapes_int[0: view]))
-        stop = np.sum(np.asarray(self.shapes_int[0: view+1]))
+        stop = np.sum(np.asarray(self.shapes_int[0: view + 1]))
         return self.data[start:stop, raw]
 
     def add_view(self, v, data):
         if len(self.shape) > 0:
             if data.shape[0] == self.data.shape[0]:
                 indice = self.shapes_int[v]
-                np.insert(self.data, data, indice+1, axis=0)
+                np.insert(self.data, data, indice + 1, axis=0)
                 self.shapes_int.append(data.shape[1])
-                self.n_views +=1
+                self.n_views += 1
         else:
             raise ValueError("New view can't initialazed")
-           # self.shapes_int= [data.shape[1]]
-           # self.data.reshape(data.shape[0],)
-           # np.insert(self.data, data, 0)
-           # self.n_views = 1
+        # self.shapes_int= [data.shape[1]]
+        # self.data.reshape(data.shape[0],)
+        # np.insert(self.data, data, 0)
+        # self.n_views = 1
 
     def _todict(self):
         dico = {}
@@ -172,10 +180,10 @@ class Metriclearn_array(ma.MaskedArray, np.ndarray):
                 raise ValueError("Values in views_ind are not in a correct "
                                  + "range for the provided data.")
             self.view_mode_ = "slices"
-            n_views = views_ind.shape[0]-1
+            n_views = views_ind.shape[0] - 1
         else:
             raise ValueError("The format of views_ind is not "
-                                     + "supported.")
+                             + "supported.")
 
         return (views_ind, n_views)
 
@@ -218,11 +226,10 @@ class DataSample(dict):
 
         # The dictionary that contains the sample
         super(DataSample, self).__init__(kwargs)
-        self._data = None # Metriclearn_array(np.zeros((0,0)))
+        self._data = None  # Metriclearn_array(np.zeros((0,0)))
         if data is not None:
             self._data = Metriclearn_array(data)
 
-
     @property
     def data(self):
         """Metriclearn_array"""
@@ -231,7 +238,8 @@ class DataSample(dict):
 
     @data.setter
     def data(self, data):
-        if isinstance(data, (Metriclearn_array, np.ndarray, ma.MaskedArray, np.generic)):
+        if isinstance(data, (
+        Metriclearn_array, np.ndarray, ma.MaskedArray, np.generic)):
             self._data = data
         else:
             raise TypeError("sample should be a Metriclearn_array.")
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
index 09b863bcde87acf45062b81eb14fa87a57123515..05e4cd05af16e4202eb5f8db07af2891510ca900 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
@@ -1,14 +1,12 @@
 import itertools
 import math
-import inspect
-import os
 
 import numpy as np
 
+from .fusion_utils import BaseFusionClassifier
 from ...multiview.multiview_utils import ConfigGenerator, \
     get_available_monoview_classifiers, \
     BaseMultiviewClassifier
-from .fusion_utils import BaseFusionClassifier
 from ...utils.dataset import get_examples_views_indices
 
 
@@ -34,17 +32,22 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
                                                                  view_indices)
         # TODO : Finer analysis, may support a bit of mutliclass
         if np.unique(y[train_indices]).shape[0] > 2:
-            raise ValueError("Multiclass not supported, classes used : {}".format(np.unique(y[train_indices])))
+            raise ValueError(
+                "Multiclass not supported, classes used : {}".format(
+                    np.unique(y[train_indices])))
         if self.monoview_estimators is None:
             self.monoview_estimators = []
-            for classifier_idx, classifier_name in enumerate(self.classifier_names):
+            for classifier_idx, classifier_name in enumerate(
+                    self.classifier_names):
                 self.monoview_estimators.append([])
                 for idx, view_idx in enumerate(view_indices):
-                    estimator = self.init_monoview_estimator(classifier_name, self.classifier_configs)
-                    estimator.fit(X.get_v(view_idx, train_indices), y[train_indices])
+                    estimator = self.init_monoview_estimator(classifier_name,
+                                                             self.classifier_configs)
+                    estimator.fit(X.get_v(view_idx, train_indices),
+                                  y[train_indices])
                     self.monoview_estimators[classifier_idx].append(estimator)
         else:
-            pass #TODO
+            pass  # TODO
         self.choose_combination(X, y, train_indices, view_indices)
         return self
 
@@ -54,14 +57,16 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
                                                                    example_indices,
                                                                    view_indices)
         nb_class = X.get_nb_class()
-        if nb_class>2:
-            nb_class=3
+        if nb_class > 2:
+            nb_class = 3
         votes = np.zeros((len(example_indices), nb_class), dtype=float)
-        monoview_predictions = [monoview_estimator.predict(X.get_v(view_idx, example_indices))
-                                for view_idx, monoview_estimator
-                                in zip(view_indices, self.monoview_estimators)]
+        monoview_predictions = [
+            monoview_estimator.predict(X.get_v(view_idx, example_indices))
+            for view_idx, monoview_estimator
+            in zip(view_indices, self.monoview_estimators)]
         for idx, example_index in enumerate(example_indices):
-            for monoview_estimator_index, monoview_prediciton in enumerate(monoview_predictions):
+            for monoview_estimator_index, monoview_prediciton in enumerate(
+                    monoview_predictions):
                 if int(monoview_prediciton[idx]) == -100:
                     votes[idx, 2] += 1
                 else:
@@ -71,8 +76,8 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
 
     def get_classifiers_decisions(self, X, view_indices, examples_indices):
         classifiers_decisions = np.zeros((len(self.monoview_estimators),
-                                              len(view_indices),
-                                              len(examples_indices)))
+                                          len(view_indices),
+                                          len(examples_indices)))
         for estimator_idx, estimator in enumerate(self.monoview_estimators):
             for idx, view_index in enumerate(view_indices):
                 classifiers_decisions[estimator_idx, idx, :] = estimator[
@@ -108,9 +113,10 @@ class GlobalDiversityFusionClassifier(DiversityFusionClassifier):
                 y[examples_indices])
         best_combi_index = np.argmax(div_measure)
         best_combination = combis[best_combi_index]
-        self.monoview_estimators = [self.monoview_estimators[classifier_index][view_index]
-                                    for view_index, classifier_index
-                                    in enumerate(best_combination)]
+        self.monoview_estimators = [
+            self.monoview_estimators[classifier_index][view_index]
+            for view_index, classifier_index
+            in enumerate(best_combination)]
 
 
 class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
@@ -121,8 +127,8 @@ class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
         for combinations_index, combination in enumerate(combinations):
             combis[combinations_index] = combination
             combi_with_view = [(viewIndex, combiIndex) for viewIndex, combiIndex
-                             in
-                             enumerate(combination)]
+                               in
+                               enumerate(combination)]
             binomes = itertools.combinations(combi_with_view, 2)
             nb_binomes = int(
                 math.factorial(nb_views) / 2 / math.factorial(nb_views - 2))
@@ -135,16 +141,15 @@ class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
                         classifiers_decisions[classifier_index_1, view_index_1],
                         classifiers_decisions[classifier_index_2, view_index_2],
                         y[examples_indices])
-                    )
+                )
                 couple_diversities[binome_index] = couple_diversity
             div_measure[combinations_index] = np.mean(couple_diversities)
         best_combi_index = np.argmax(div_measure)
         best_combination = combis[best_combi_index]
-        self.monoview_estimators = [self.monoview_estimators[classifier_index][view_index]
-                                    for view_index, classifier_index
-                                    in enumerate(best_combination)]
-
-
+        self.monoview_estimators = [
+            self.monoview_estimators[classifier_index][view_index]
+            for view_index, classifier_index
+            in enumerate(best_combination)]
 
 #
 # def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
index 4754b9a96d234f705a60754495be051b6b267f96..7ad8a76bb9678127919d2aa6b9210f7147f03675 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
@@ -1,6 +1,5 @@
 import inspect
 
-
 from ...multiview.multiview_utils import get_monoview_classifier
 from ...utils.multiclass import get_mc_estim
 
@@ -35,4 +34,3 @@ class BaseFusionClassifier():
 
         return get_mc_estim(estimator, random_state=self.random_state,
                             multiview=False, multiclass=multiclass)
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
index 1c28f931033307328ef84882fcb386feee1021df..f657c6c2d4dd65e6453b28a23870e32923ad0b85 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
@@ -4,17 +4,19 @@ from .late_fusion_utils import LateFusionClassifier
 from ...monoview.monoview_utils import CustomRandint
 from ...utils.dataset import get_examples_views_indices
 
+
 class BaseJumboFusion(LateFusionClassifier):
 
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None,
                  nb_cores=1, weights=None, nb_monoview_per_view=1, rs=None):
-        LateFusionClassifier.__init__(self, random_state, classifiers_names=classifiers_names,
-                                             classifier_configs=classifier_configs,
-                                             nb_cores=nb_cores, weights=weights,
-                                              rs=rs)
+        LateFusionClassifier.__init__(self, random_state,
+                                      classifiers_names=classifiers_names,
+                                      classifier_configs=classifier_configs,
+                                      nb_cores=nb_cores, weights=weights,
+                                      rs=rs)
         self.param_names += ["nb_monoview_per_view", ]
-        self.distribs += [CustomRandint(1,10)]
+        self.distribs += [CustomRandint(1, 10)]
         self.nb_monoview_per_view = nb_monoview_per_view
 
     def set_params(self, nb_monoview_per_view=1, **params):
@@ -22,44 +24,58 @@ class BaseJumboFusion(LateFusionClassifier):
         LateFusionClassifier.set_params(self, **params)
 
     def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, view_indices = get_examples_views_indices(X, example_indices, view_indices)
-        monoview_decisions = self.predict_monoview(X, example_indices=example_indices, view_indices=view_indices)
+        example_indices, view_indices = get_examples_views_indices(X,
+                                                                   example_indices,
+                                                                   view_indices)
+        monoview_decisions = self.predict_monoview(X,
+                                                   example_indices=example_indices,
+                                                   view_indices=view_indices)
         return self.aggregation_estimator.predict(monoview_decisions)
 
     def fit(self, X, y, train_indices=None, view_indices=None):
-        train_indices, view_indices = get_examples_views_indices(X, train_indices, view_indices)
-        self.init_classifiers(len(view_indices), nb_monoview_per_view=self.nb_monoview_per_view)
-        self.fit_monoview_estimators(X, y, train_indices=train_indices, view_indices=view_indices)
-        monoview_decisions = self.predict_monoview(X, example_indices=train_indices, view_indices=view_indices)
+        train_indices, view_indices = get_examples_views_indices(X,
+                                                                 train_indices,
+                                                                 view_indices)
+        self.init_classifiers(len(view_indices),
+                              nb_monoview_per_view=self.nb_monoview_per_view)
+        self.fit_monoview_estimators(X, y, train_indices=train_indices,
+                                     view_indices=view_indices)
+        monoview_decisions = self.predict_monoview(X,
+                                                   example_indices=train_indices,
+                                                   view_indices=view_indices)
         self.aggregation_estimator.fit(monoview_decisions, y[train_indices])
         return self
 
-    def fit_monoview_estimators(self, X, y, train_indices=None, view_indices=None):
-        if np.unique(y).shape[0]>2:
-            multiclass=True
+    def fit_monoview_estimators(self, X, y, train_indices=None,
+                                view_indices=None):
+        if np.unique(y).shape[0] > 2:
+            multiclass = True
         else:
-            multiclass=False
-        self.monoview_estimators = [[self.init_monoview_estimator(classifier_name,
-                                                                  self.classifier_configs[classifier_index],
-                                                                  multiclass=multiclass)
-                                     for classifier_index, classifier_name
-                                     in enumerate(self.classifiers_names)]
-                                    for _ in view_indices]
+            multiclass = False
+        self.monoview_estimators = [
+            [self.init_monoview_estimator(classifier_name,
+                                          self.classifier_configs[
+                                              classifier_index],
+                                          multiclass=multiclass)
+             for classifier_index, classifier_name
+             in enumerate(self.classifiers_names)]
+            for _ in view_indices]
 
-        self.monoview_estimators = [[estimator.fit(X.get_v(view_indices[idx], train_indices), y[train_indices])
+        self.monoview_estimators = [[estimator.fit(
+            X.get_v(view_indices[idx], train_indices), y[train_indices])
                                      for estimator in view_estimators]
-                                    for idx, view_estimators in enumerate(self.monoview_estimators)]
+                                    for idx, view_estimators in
+                                    enumerate(self.monoview_estimators)]
         return self
 
     def predict_monoview(self, X, example_indices=None, view_indices=None):
-        monoview_decisions = np.zeros((len(example_indices), len(view_indices)*len(self.classifiers_names)))
+        monoview_decisions = np.zeros((len(example_indices),
+                                       len(view_indices) * len(
+                                           self.classifiers_names)))
         for idx, view_estimators in enumerate(self.monoview_estimators):
             for estimator_index, estimator in enumerate(view_estimators):
-                monoview_decisions[:, len(self.classifiers_names)*idx+estimator_index] = estimator.predict(X.get_v(view_indices[idx],
-                                                                                                                   example_indices))
+                monoview_decisions[:, len(
+                    self.classifiers_names) * idx + estimator_index] = estimator.predict(
+                    X.get_v(view_indices[idx],
+                            example_indices))
         return monoview_decisions
-
-
-
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
index d8ebceff390ee48a1cd7cce5028f03eab3f2fe38..e2e8da5db99dd6a63fa35bd360c7b6fdff951c6a 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
@@ -1,10 +1,8 @@
 import numpy as np
-import warnings
-from scipy.stats import uniform
 
-
-from ...multiview.multiview_utils import BaseMultiviewClassifier, get_available_monoview_classifiers, get_monoview_classifier, ConfigGenerator
 from .fusion_utils import BaseFusionClassifier
+from ...multiview.multiview_utils import BaseMultiviewClassifier, \
+    get_available_monoview_classifiers, ConfigGenerator
 from ...utils.dataset import get_examples_views_indices
 
 
@@ -24,7 +22,8 @@ class ClassifierDistribution:
 class ClassifierCombinator:
 
     def __init__(self, need_probas=False):
-        self.available_classifiers = get_available_monoview_classifiers(need_probas)
+        self.available_classifiers = get_available_monoview_classifiers(
+            need_probas)
 
     def rvs(self, random_state=None):
         return ClassifierDistribution(seed=random_state.randint(1),
@@ -47,7 +46,7 @@ class ConfigDistribution:
 
 class MultipleConfigGenerator:
 
-    def __init__(self,):
+    def __init__(self, ):
         self.available_classifiers = get_available_monoview_classifiers()
 
     def rvs(self, random_state=None):
@@ -62,14 +61,14 @@ class WeightDistribution:
         self.distribution_type = distribution_type
 
     def draw(self, nb_view):
-        if self.distribution_type=="uniform":
+        if self.distribution_type == "uniform":
             return self.random_state.random_sample(nb_view)
 
 
 class WeightsGenerator:
 
     def __init__(self, distibution_type="uniform"):
-        self.distribution_type=distibution_type
+        self.distribution_type = distibution_type
 
     def rvs(self, random_state=None):
         return WeightDistribution(seed=random_state.randint(1),
@@ -86,29 +85,31 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
         self.classifier_configs = classifier_configs
         self.nb_cores = nb_cores
         self.weights = weights
-        self.rs=rs
-        self.param_names = ["classifiers_names", "classifier_configs", "weights", "rs"]
-        self.distribs =[ClassifierCombinator(need_probas=self.need_probas),
-                        MultipleConfigGenerator(),
-                        WeightsGenerator(),
-                        np.arange(1000)]
+        self.rs = rs
+        self.param_names = ["classifiers_names", "classifier_configs",
+                            "weights", "rs"]
+        self.distribs = [ClassifierCombinator(need_probas=self.need_probas),
+                         MultipleConfigGenerator(),
+                         WeightsGenerator(),
+                         np.arange(1000)]
 
     def fit(self, X, y, train_indices=None, view_indices=None):
         train_indices, view_indices = get_examples_views_indices(X,
-                                                                  train_indices,
-                                                                  view_indices)
-        if np.unique(y).shape[0]>2:
-            multiclass=True
+                                                                 train_indices,
+                                                                 view_indices)
+        if np.unique(y).shape[0] > 2:
+            multiclass = True
         else:
-            multiclass=False
+            multiclass = False
         self.init_params(len(view_indices), multiclass)
         if np.unique(y[train_indices]).shape[0] > 2:
             raise ValueError("Multiclass not supported")
-        self.monoview_estimators = [monoview_estimator.fit(X.get_v(view_index, train_indices),
-                                                           y[train_indices])
-                                    for view_index, monoview_estimator
-                                    in zip(view_indices,
-                                           self.monoview_estimators)]
+        self.monoview_estimators = [
+            monoview_estimator.fit(X.get_v(view_index, train_indices),
+                                   y[train_indices])
+            for view_index, monoview_estimator
+            in zip(view_indices,
+                   self.monoview_estimators)]
         return self
 
     def init_params(self, nb_view, mutliclass=False):
@@ -117,13 +118,14 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
         elif isinstance(self.weights, WeightDistribution):
             self.weights = self.weights.draw(nb_view)
         else:
-            self.weights = self.weights/np.sum(self.weights)
+            self.weights = self.weights / np.sum(self.weights)
 
         self.init_classifiers(nb_view)
 
         self.monoview_estimators = [
             self.init_monoview_estimator(classifier_name,
-                                         self.classifier_configs[classifier_index],
+                                         self.classifier_configs[
+                                             classifier_index],
                                          classifier_index=classifier_index,
                                          multiclass=mutliclass)
             for classifier_index, classifier_name
@@ -135,19 +137,22 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
         else:
             nb_clfs = nb_view
         if isinstance(self.classifiers_names, ClassifierDistribution):
-            self.classifiers_names = self.classifiers_names.draw(nb_clfs, self.rs)
+            self.classifiers_names = self.classifiers_names.draw(nb_clfs,
+                                                                 self.rs)
         elif self.classifiers_names is None:
             self.classifiers_names = ["decision_tree" for _ in range(nb_clfs)]
 
         if isinstance(self.classifier_configs, ConfigDistribution):
-            self.classifier_configs = self.classifier_configs.draw(nb_clfs, self.rs)
+            self.classifier_configs = self.classifier_configs.draw(nb_clfs,
+                                                                   self.rs)
         elif isinstance(self.classifier_configs, dict):
-            self.classifier_configs = [{classifier_name: self.classifier_configs[classifier_name]} for classifier_name in self.classifiers_names]
+            self.classifier_configs = [
+                {classifier_name: self.classifier_configs[classifier_name]} for
+                classifier_name in self.classifiers_names]
         elif self.classifier_configs is None:
             self.classifier_configs = [None for _ in range(nb_clfs)]
 
-
-# def verif_clf_views(self, classifier_names, nb_view):
+    # def verif_clf_views(self, classifier_names, nb_view):
     #     if classifier_names is None:
     #         if nb_view is None:
     #             raise AttributeError(self.__class__.__name__+" must have either classifier_names or nb_views provided.")
@@ -163,6 +168,6 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
     #                 warnings.warn("nb_view and classifier_names not matching, choosing nb_view random classifiers in classifier_names.", UserWarning)
     #                 self.classifiers_names = self.get_classifiers(classifier_names, nb_view)
 
-
     def get_classifiers(self, classifiers_names, nb_choices):
-        return self.random_state.choice(classifiers_names, size=nb_choices, replace=True)
+        return self.random_state.choice(classifiers_names, size=nb_choices,
+                                        replace=True)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
index 8158ec7ff13339a48eab1faa7caa28afb6f9f262..6aa2a7bedbf7d21501983827d2463fb14f4a527b 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
@@ -1,16 +1,5 @@
-import logging
-import math
-import time
-from collections import defaultdict
-
 import numpy as np
-import numpy.ma as ma
-import scipy
 from sklearn.base import BaseEstimator, ClassifierMixin
-from sklearn.utils.validation import check_is_fitted
-
-from ... import metrics
-from ... import monoview_classifiers
 
 
 def get_names(classed_list):
@@ -66,13 +55,10 @@ class BaseMultiviewClassifier(BaseEstimator, ClassifierMixin):
         return "No detailed interpretation function"
 
 
-def get_train_views_indices(dataset, train_indices, view_indices,):
+def get_train_views_indices(dataset, train_indices, view_indices, ):
     """This function  is used to get all the examples indices and view indices if needed"""
     if view_indices is None:
         view_indices = np.arange(dataset.nb_view)
     if train_indices is None:
         train_indices = range(dataset.get_nb_examples())
     return train_indices, view_indices
-
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
index 2d9c903b227a985981e500a54d460367fb393c38..5c5ae1c2d29d9a696c56baa5fef0a713aaeecdbc 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
@@ -11,18 +11,18 @@ class BayesianInferenceClassifier(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None, nb_cores=1, weights=None,
                  rs=None):
-        self.need_probas=True
+        self.need_probas = True
         LateFusionClassifier.__init__(self, random_state=random_state,
-                                             classifiers_names=classifiers_names,
-                                             classifier_configs=classifier_configs,
-                                             nb_cores=nb_cores,
-                                             weights=weights,
-                                                          rs=rs)
+                                      classifiers_names=classifiers_names,
+                                      classifier_configs=classifier_configs,
+                                      nb_cores=nb_cores,
+                                      weights=weights,
+                                      rs=rs)
 
     def predict(self, X, example_indices=None, view_indices=None):
         example_indices, view_indices = get_examples_views_indices(X,
-                                                                    example_indices,
-                                                                    view_indices)
+                                                                   example_indices,
+                                                                   view_indices)
 
         if sum(self.weights) != 1.0:
             self.weights = self.weights / sum(self.weights)
@@ -30,8 +30,9 @@ class BayesianInferenceClassifier(LateFusionClassifier):
         view_scores = []
         for index, view_index in enumerate(view_indices):
             view_scores.append(np.power(
-                self.monoview_estimators[index].predict_proba(X.get_v(view_index,
-                                                                      example_indices)),
+                self.monoview_estimators[index].predict_proba(
+                    X.get_v(view_index,
+                            example_indices)),
                 self.weights[index]))
         view_scores = np.array(view_scores)
         predicted_labels = np.argmax(np.prod(view_scores, axis=0), axis=1)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
index b89fa1e4a397590c7eddf7e2dea8574f0095f681..0c66e5619ba5091576808f9919583ab165c47f2f 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
@@ -1,7 +1,7 @@
 import numpy as np
 
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import GlobalDiversityFusionClassifier
-
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
+    GlobalDiversityFusionClassifier
 
 classifier_class_name = "DifficultyFusion"
 
@@ -13,20 +13,16 @@ class DifficultyFusion(GlobalDiversityFusionClassifier):
         scores = np.zeros((nb_view, nb_examples), dtype=int)
         for view_index, classifier_index in enumerate(combination):
             scores[view_index, :] = np.logical_not(
-                    np.logical_xor(classifiers_decisions[classifier_index,
-                                                         view_index],
-                                   y)
-                )
+                np.logical_xor(classifiers_decisions[classifier_index,
+                                                     view_index],
+                               y)
+            )
         # Table of the nuber of views that succeeded for each example :
         difficulty_scores = np.sum(scores, axis=0)
 
         difficulty_score = np.var(
-                np.array([
-                             np.sum((difficulty_scores == view_index))
-                             for view_index in range(len(combination)+1)])
-                )
+            np.array([
+                np.sum((difficulty_scores == view_index))
+                for view_index in range(len(combination) + 1)])
+        )
         return difficulty_score
-
-
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
index 2dec7814a5f383471685aedcf75c1286e66b8273..cee032a878b8ba9e062654f685317f193607b014 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
@@ -1,12 +1,14 @@
 import numpy as np
 
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import CoupleDiversityFusionClassifier
-
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
+    CoupleDiversityFusionClassifier
 
 classifier_class_name = "DisagreeFusion"
 
 
 class DisagreeFusion(CoupleDiversityFusionClassifier):
 
-    def diversity_measure(self, first_classifier_decision, second_classifier_decision, _):
-        return np.logical_xor(first_classifier_decision, second_classifier_decision)
+    def diversity_measure(self, first_classifier_decision,
+                          second_classifier_decision, _):
+        return np.logical_xor(first_classifier_decision,
+                              second_classifier_decision)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
index 6d5e846b43db9edfa5f7a3bdb06162a438e16c95..3c3d5aef7c6453540e06083b37bba0f1935ae62b 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
@@ -1,7 +1,7 @@
 import numpy as np
 
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import GlobalDiversityFusionClassifier
-
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
+    GlobalDiversityFusionClassifier
 
 classifier_class_name = "EntropyFusion"
 
@@ -13,12 +13,14 @@ class EntropyFusion(GlobalDiversityFusionClassifier):
         scores = np.zeros((nb_view, nb_examples), dtype=int)
         for view_index, classifier_index in enumerate(combination):
             scores[view_index] = np.logical_not(
-                np.logical_xor(classifiers_decisions[classifier_index, view_index],
-                               y)
+                np.logical_xor(
+                    classifiers_decisions[classifier_index, view_index],
+                    y)
             )
         entropy_scores = np.sum(scores, axis=0)
         nb_view_matrix = np.zeros((nb_examples),
-                                dtype=int) + nb_view - entropy_scores
-        entropy_score = np.mean(np.minimum(entropy_scores, nb_view_matrix).astype(float) / (
-                        nb_view - int(nb_view / 2)))
+                                  dtype=int) + nb_view - entropy_scores
+        entropy_score = np.mean(
+            np.minimum(entropy_scores, nb_view_matrix).astype(float) / (
+                    nb_view - int(nb_view / 2)))
         return entropy_score
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
index 4047402a06aee541e9b85e887170181bf68e63e7..23c102b655297d0a68f8aed6309da6eda51206c0 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
@@ -1,17 +1,20 @@
 import numpy as np
 
-from ..multiview_classifiers.additions.late_fusion_utils import LateFusionClassifier
+from ..multiview_classifiers.additions.late_fusion_utils import \
+    LateFusionClassifier
 from ..utils.dataset import get_examples_views_indices
 
-classifier_class_name =  "MajorityVoting"
+classifier_class_name = "MajorityVoting"
+
 
 class VotingIndecision(Exception):
     pass
 
+
 class MajorityVoting(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None, weights=None, nb_cores=1, rs=None):
-        self.need_probas=False
+        self.need_probas = False
         LateFusionClassifier.__init__(self, random_state=random_state,
                                       classifiers_names=classifiers_names,
                                       classifier_configs=classifier_configs,
@@ -25,17 +28,24 @@ class MajorityVoting(LateFusionClassifier):
                                                                      view_indices)
 
         n_examples = len(examples_indices)
-        votes = np.zeros((n_examples, X.get_nb_class(example_indices)), dtype=float)
-        monoview_decisions = np.zeros((len(examples_indices), X.nb_view), dtype=int)
+        votes = np.zeros((n_examples, X.get_nb_class(example_indices)),
+                         dtype=float)
+        monoview_decisions = np.zeros((len(examples_indices), X.nb_view),
+                                      dtype=int)
         for index, view_index in enumerate(views_indices):
-            monoview_decisions[:, index] = self.monoview_estimators[index].predict(
+            monoview_decisions[:, index] = self.monoview_estimators[
+                index].predict(
                 X.get_v(view_index, examples_indices))
         for example_index in range(n_examples):
-            for view_index, feature_classification in enumerate(monoview_decisions[example_index, :]):
-                votes[example_index, feature_classification] += self.weights[view_index]
-            nb_maximum = len(np.where(votes[example_index] == max(votes[example_index]))[0])
+            for view_index, feature_classification in enumerate(
+                    monoview_decisions[example_index, :]):
+                votes[example_index, feature_classification] += self.weights[
+                    view_index]
+            nb_maximum = len(
+                np.where(votes[example_index] == max(votes[example_index]))[0])
             if nb_maximum == X.nb_view:
-                raise VotingIndecision("Majority voting can't decide, each classifier has voted for a different class")
+                raise VotingIndecision(
+                    "Majority voting can't decide, each classifier has voted for a different class")
 
         predicted_labels = np.argmax(votes, axis=1)
         # Can be upgraded by restarting a new classification process if
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
index 76637c59c721ba391f77566ccb725b9b80f9aa84..d9a2e38d21a9be49690ca372616ebde60a438f55 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
@@ -5,19 +5,23 @@ from ..monoview.monoview_utils import CustomUniform, CustomRandint
 
 classifier_class_name = "SVMJumboFusion"
 
+
 class SVMJumboFusion(BaseJumboFusion):
 
     def __init__(self, random_state=None, classifiers_names=None,
                  classifier_configs=None, nb_cores=1, weights=None,
-                 nb_monoview_per_view=1,  C=1.0, kernel="rbf", degree=2, rs=None):
-        self.need_probas=False
-        BaseJumboFusion.__init__(self, random_state, classifiers_names=classifiers_names,
-                                             classifier_configs=classifier_configs,
-                                             nb_cores=nb_cores, weights=weights,
-                                             nb_monoview_per_view=nb_monoview_per_view,
-                                             rs=rs)
+                 nb_monoview_per_view=1, C=1.0, kernel="rbf", degree=2,
+                 rs=None):
+        self.need_probas = False
+        BaseJumboFusion.__init__(self, random_state,
+                                 classifiers_names=classifiers_names,
+                                 classifier_configs=classifier_configs,
+                                 nb_cores=nb_cores, weights=weights,
+                                 nb_monoview_per_view=nb_monoview_per_view,
+                                 rs=rs)
         self.param_names += ["C", "kernel", "degree"]
-        self.distribs += [CustomUniform(), ["rbf", "poly", "linear"], CustomRandint(2, 5)]
+        self.distribs += [CustomUniform(), ["rbf", "poly", "linear"],
+                          CustomRandint(2, 5)]
         self.aggregation_estimator = SVC(C=C, kernel=kernel, degree=degree)
         self.C = C
         self.kernel = kernel
@@ -30,7 +34,3 @@ class SVMJumboFusion(BaseJumboFusion):
         self.kernel = kernel
         self.aggregation_estimator.set_params(C=C, kernel=kernel, degree=degree)
         return self
-
-
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
index de91840052caf79a10ca229810e78a22ec9d9f25..3da191fb0b3ab94aadb4cfb7fd0258568c120b72 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
@@ -1,15 +1,13 @@
 import numpy as np
-import inspect
 
-# from ..utils.dataset import get_v
-
-from ..utils.dataset import get_examples_views_indices
+from multiview_platform.mono_multi_view_classifiers import monoview_classifiers
+from .additions.fusion_utils import BaseFusionClassifier
 from ..multiview.multiview_utils import get_available_monoview_classifiers, \
     BaseMultiviewClassifier, ConfigGenerator
-from .additions.fusion_utils import BaseFusionClassifier
+from ..utils.dataset import get_examples_views_indices
 from ..utils.multiclass import get_mc_estim, MultiClassWrapper
 
-from  multiview_platform.mono_multi_view_classifiers import monoview_classifiers
+# from ..utils.dataset import get_v
 
 classifier_class_name = "WeightedLinearEarlyFusion"
 
@@ -28,6 +26,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
     Attributes
     ----------
     """
+
     def __init__(self, random_state=None, view_weights=None,
                  monoview_classifier_name="decision_tree",
                  monoview_classifier_config={}):
@@ -36,25 +35,29 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
         self.monoview_classifier_name = monoview_classifier_name
         self.short_name = "early fusion " + self.monoview_classifier_name
         if monoview_classifier_name in monoview_classifier_config:
-            self.monoview_classifier_config = monoview_classifier_config[monoview_classifier_name]
+            self.monoview_classifier_config = monoview_classifier_config[
+                monoview_classifier_name]
         self.monoview_classifier_config = monoview_classifier_config
         monoview_classifier_module = getattr(monoview_classifiers,
-                                              self.monoview_classifier_name)
+                                             self.monoview_classifier_name)
         monoview_classifier_class = getattr(monoview_classifier_module,
-                                             monoview_classifier_module.classifier_class_name)
-        self.monoview_classifier = monoview_classifier_class(random_state=random_state,
-                                                             **self.monoview_classifier_config)
-        self.param_names = ["monoview_classifier_name", "monoview_classifier_config"]
+                                            monoview_classifier_module.classifier_class_name)
+        self.monoview_classifier = monoview_classifier_class(
+            random_state=random_state,
+            **self.monoview_classifier_config)
+        self.param_names = ["monoview_classifier_name",
+                            "monoview_classifier_config"]
         self.distribs = [get_available_monoview_classifiers(),
                          ConfigGenerator(get_available_monoview_classifiers())]
         self.classed_params = []
-        self.weird_strings={}
+        self.weird_strings = {}
 
     def set_params(self, monoview_classifier_name=None,
                    monoview_classifier_config=None, **params):
         self.monoview_classifier_name = monoview_classifier_name
-        self.monoview_classifier = self.init_monoview_estimator(monoview_classifier_name,
-                                       monoview_classifier_config)
+        self.monoview_classifier = self.init_monoview_estimator(
+            monoview_classifier_name,
+            monoview_classifier_config)
         self.monoview_classifier_config = self.monoview_classifier.get_params()
         self.short_name = "early fusion " + self.monoview_classifier_name
         return self
@@ -69,7 +72,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
         train_indices, X = self.transform_data_to_monoview(X, train_indices,
                                                            view_indices)
         if np.unique(y[train_indices]).shape[0] > 2 and \
-                not(isinstance(self.monoview_classifier, MultiClassWrapper)):
+                not (isinstance(self.monoview_classifier, MultiClassWrapper)):
             self.monoview_classifier = get_mc_estim(self.monoview_classifier,
                                                     self.random_state,
                                                     multiview=False,
@@ -82,7 +85,8 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
         predicted_labels = self.monoview_classifier.predict(X)
         return predicted_labels
 
-    def transform_data_to_monoview(self, dataset, example_indices, view_indices):
+    def transform_data_to_monoview(self, dataset, example_indices,
+                                   view_indices):
         """Here, we extract the data from the HDF5 dataset file and store all
         the concatenated views in one variable"""
         example_indices, self.view_indices = get_examples_views_indices(dataset,
@@ -111,9 +115,3 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
     #         self.monoview_classifier.set_params(**monoview_classifier_config[monoview_classifier_name])
     #     else:
     #         self.monoview_classifier.set_params(**monoview_classifier_config)
-
-
-
-
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
index 7e5c01aa980f591dfaa1d2a311dbcd34c46cf17e..32f4a71033d8d0c2e82804a45fc7c622e5b51598 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
@@ -1,6 +1,7 @@
 import numpy as np
 
-from ..multiview_classifiers.additions.late_fusion_utils import LateFusionClassifier
+from ..multiview_classifiers.additions.late_fusion_utils import \
+    LateFusionClassifier
 from ..utils.dataset import get_examples_views_indices
 
 classifier_class_name = "WeightedLinearLateFusion"
@@ -9,18 +10,21 @@ classifier_class_name = "WeightedLinearLateFusion"
 class WeightedLinearLateFusion(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None, weights=None, nb_cores=1, rs=None):
-        self.need_probas=True
+        self.need_probas = True
         LateFusionClassifier.__init__(self, random_state=random_state,
                                       classifiers_names=classifiers_names,
                                       classifier_configs=classifier_configs,
-                                      nb_cores=nb_cores,weights=weights, rs=rs)
+                                      nb_cores=nb_cores, weights=weights, rs=rs)
 
     def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, views_indices = get_examples_views_indices(X, example_indices, view_indices)
+        example_indices, views_indices = get_examples_views_indices(X,
+                                                                    example_indices,
+                                                                    view_indices)
         view_scores = []
         for index, viewIndex in enumerate(views_indices):
-            view_scores.append(np.array(self.monoview_estimators[index].predict_proba(
-                X.get_v(viewIndex, example_indices))) * self.weights[index])
+            view_scores.append(
+                np.array(self.monoview_estimators[index].predict_proba(
+                    X.get_v(viewIndex, example_indices))) * self.weights[index])
         view_scores = np.array(view_scores)
         predicted_labels = np.argmax(np.sum(view_scores, axis=0), axis=1)
         return predicted_labels
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
index c5017e786f61912a9c4bc133a059b9021a11078a..a5a9a205effd2ba1b008d8745ebec35d6bd349f2 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
@@ -2,23 +2,17 @@
 import errno
 import logging
 import os
-import time
-import yaml
-import traceback
 
 import matplotlib as mpl
-from matplotlib.patches import Patch
-
 # Import third party modules
 import matplotlib.pyplot as plt
 import numpy as np
 import pandas as pd
 import plotly
+from matplotlib.patches import Patch
 
 # Import own Modules
 from .monoview.monoview_utils import MonoviewResult
-from . import metrics
-from .multiview.multiview_utils import MultiviewResult
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -26,8 +20,9 @@ __status__ = "Prototype"  # Production, Development, Prototype
 
 
 def save_dict_to_text(dictionnary, output_file):
-    #TODO : smarter way must exist
-    output_file.write("Failed algorithms : \n\t"+ ",\n\t".join(dictionnary.keys())+".\n\n\n")
+    # TODO : smarter way must exist
+    output_file.write("Failed algorithms : \n\t" + ",\n\t".join(
+        dictionnary.keys()) + ".\n\n\n")
     for key, value in dictionnary.items():
         output_file.write(key)
         output_file.write("\n\n")
@@ -36,17 +31,19 @@ def save_dict_to_text(dictionnary, output_file):
     return dictionnary.keys()
 
 
-def plot_results_noise(directory, noise_results, metric_to_plot, name, width=0.1):
+def plot_results_noise(directory, noise_results, metric_to_plot, name,
+                       width=0.1):
     avail_colors = ["tab:blue", "tab:orange", "tab:brown", "tab:gray",
                     "tab:olive", "tab:red", ]
-    colors ={}
+    colors = {}
     lengend_patches = []
     noise_levels = np.array([noise_level for noise_level, _ in noise_results])
-    df = pd.DataFrame(columns=['noise_level', 'classifier_name', 'mean_score', 'score_std'], )
-    if len(noise_results)>1:
+    df = pd.DataFrame(
+        columns=['noise_level', 'classifier_name', 'mean_score', 'score_std'], )
+    if len(noise_results) > 1:
         width = np.min(np.diff(noise_levels))
     for noise_level, noise_result in noise_results:
-        classifiers_names, meaned_metrics, metric_stds =  [], [], []
+        classifiers_names, meaned_metrics, metric_stds = [], [], []
         for noise_result in noise_result:
             classifier_name = noise_result[0].split("-")[0]
             if noise_result[1] is metric_to_plot:
@@ -58,29 +55,37 @@ def plot_results_noise(directory, noise_results, metric_to_plot, name, width=0.1
                         colors[classifier_name] = avail_colors.pop(0)
                     except IndexError:
                         colors[classifier_name] = "k"
-        classifiers_names, meaned_metrics, metric_stds = np.array(classifiers_names), np.array(meaned_metrics), np.array(metric_stds)
+        classifiers_names, meaned_metrics, metric_stds = np.array(
+            classifiers_names), np.array(meaned_metrics), np.array(metric_stds)
         sorted_indices = np.argsort(-meaned_metrics)
         for index in sorted_indices:
             row = pd.DataFrame(
-                {'noise_level':noise_level, 'classifier_name':classifiers_names[index], 'mean_score':meaned_metrics[index],
-                         'score_std':metric_stds[index]}, index=[0])
+                {'noise_level': noise_level,
+                 'classifier_name': classifiers_names[index],
+                 'mean_score': meaned_metrics[index],
+                 'score_std': metric_stds[index]}, index=[0])
             df = pd.concat([df, row])
-            plt.bar(noise_level, meaned_metrics[index], yerr=metric_stds[index], width=0.5*width, label=classifiers_names[index], color=colors[classifiers_names[index]])
+            plt.bar(noise_level, meaned_metrics[index], yerr=metric_stds[index],
+                    width=0.5 * width, label=classifiers_names[index],
+                    color=colors[classifiers_names[index]])
     for classifier_name, color in colors.items():
         lengend_patches.append(Patch(facecolor=color, label=classifier_name))
-    plt.legend(handles=lengend_patches, loc='lower center', bbox_to_anchor=(0.5, 1.05), ncol=2)
+    plt.legend(handles=lengend_patches, loc='lower center',
+               bbox_to_anchor=(0.5, 1.05), ncol=2)
     plt.ylabel(metric_to_plot)
     plt.title(name)
     plt.xticks(noise_levels)
     plt.xlabel("Noise level")
-    plt.savefig(os.path.join(directory, name+"_noise_analysis.png"))
+    plt.savefig(os.path.join(directory, name + "_noise_analysis.png"))
     plt.close()
-    df.to_csv(os.path.join(directory, name+"_noise_analysis.csv"))
+    df.to_csv(os.path.join(directory, name + "_noise_analysis.csv"))
 
 
-def plot_metric_scores(train_scores, test_scores, names, nb_results, metric_name,
+def plot_metric_scores(train_scores, test_scores, names, nb_results,
+                       metric_name,
                        file_name,
-                       tag="", train_STDs=None, test_STDs=None, use_plotly=True):
+                       tag="", train_STDs=None, test_STDs=None,
+                       use_plotly=True):
     r"""Used to plot and save the score barplot for a specific metric.
 
     Parameters
@@ -125,7 +130,7 @@ def plot_metric_scores(train_scores, test_scores, names, nb_results, metric_name
     autolabel(rect2, ax, set=2, std=train_STDs)
     ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
     ax.set_ylim(-0.1, 1.1)
-    ax.set_xticks(np.arange(nb_results) + barWidth/2)
+    ax.set_xticks(np.arange(nb_results) + barWidth / 2)
     ax.set_xticklabels(names, rotation="vertical")
 
     try:
@@ -163,9 +168,10 @@ def plot_metric_scores(train_scores, test_scores, names, nb_results, metric_name
             marker_color="black",
         ))
 
-        fig.update_layout(title=metric_name + "\n" + tag + " scores for each classifier")
-        fig.update_layout(paper_bgcolor = 'rgba(0,0,0,0)',
-                          plot_bgcolor = 'rgba(0,0,0,0)')
+        fig.update_layout(
+            title=metric_name + "\n" + tag + " scores for each classifier")
+        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
+                          plot_bgcolor='rgba(0,0,0,0)')
         plotly.offline.plot(fig, filename=file_name + ".html", auto_open=False)
         del fig
 
@@ -203,7 +209,7 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
     Returns
     -------
     """
-    fig, ax = plt.subplots(nrows=1, ncols=1,)
+    fig, ax = plt.subplots(nrows=1, ncols=1, )
     cmap, norm = iterCmap(stats_iter)
     cax = plt.imshow(data, cmap=cmap, norm=norm,
                      aspect='auto')
@@ -214,14 +220,20 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
     cbar = fig.colorbar(cax, ticks=[-100 * stats_iter / 2, 0, stats_iter])
     cbar.ax.set_yticklabels(['Unseen', 'Always Wrong', 'Always Right'])
 
-    fig.savefig(file_name + "error_analysis_2D.png", bbox_inches="tight", transparent=True)
+    fig.savefig(file_name + "error_analysis_2D.png", bbox_inches="tight",
+                transparent=True)
     plt.close()
     ### The following part is used to generate an interactive graph.
     if use_plotly:
-        label_index_list = np.concatenate([np.where(labels==i)[0] for i in np.unique(labels)]) #[np.where(labels==i)[0] for i in np.unique(labels)]
-        hover_text = [[example_ids[example_index] + " failed "+ str(stats_iter-data[example_index,classifier_index])+" time(s), labelled "+str(labels[example_index])
+        label_index_list = np.concatenate([np.where(labels == i)[0] for i in
+                                           np.unique(
+                                               labels)])  # [np.where(labels==i)[0] for i in np.unique(labels)]
+        hover_text = [[example_ids[example_index] + " failed " + str(
+            stats_iter - data[
+                example_index, classifier_index]) + " time(s), labelled " + str(
+            labels[example_index])
                        for classifier_index in range(data.shape[1])]
-                      for example_index in range(data.shape[0]) ]
+                      for example_index in range(data.shape[0])]
         fig = plotly.graph_objs.Figure()
         # for row_index, label_index in enumerate(label_index_list):
         fig.add_trace(plotly.graph_objs.Heatmap(
@@ -233,13 +245,14 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
             colorscale="Greys",
             colorbar=dict(tickvals=[0, stats_iter],
                           ticktext=["Always Wrong", "Always Right"]),
-            reversescale=True),)
+            reversescale=True), )
         fig.update_yaxes(title_text="Examples", showticklabels=False, ticks='')
-        fig.update_xaxes(showticklabels=False,)
-        fig.update_layout(paper_bgcolor = 'rgba(0,0,0,0)',
-                          plot_bgcolor = 'rgba(0,0,0,0)')
+        fig.update_xaxes(showticklabels=False, )
+        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
+                          plot_bgcolor='rgba(0,0,0,0)')
         fig.update_xaxes(showticklabels=True, )
-        plotly.offline.plot(fig, filename=file_name + "error_analysis_2D.html", auto_open=False)
+        plotly.offline.plot(fig, filename=file_name + "error_analysis_2D.html",
+                            auto_open=False)
         del fig
 
 
@@ -332,6 +345,7 @@ def autolabel(rects, ax, set=1, std=None):
                     "%.2f" % height, weight=weight,
                     ha='center', va='bottom', size="small")
 
+
 def get_fig_size(nb_results, min_size=15, multiplier=1.0, bar_width=0.35):
     r"""Used to get the image size to save the figure and the bar width, depending on the number of scores to plot.
 
@@ -380,23 +394,26 @@ def get_metrics_scores_biclass(metrics, results):
         -`metricScores[metric_name]["train_scores"]` is a list of all the available classifiers scores on the train set,
         -`metricScores[metric_name]["test_scores"]` is a list of all the available classifiers scores on the test set.
     """
-    classifier_names=[]
+    classifier_names = []
     classifier_names = [classifierResult.get_classifier_name()
                         for classifierResult in results
                         if classifierResult.get_classifier_name()
-                            not in classifier_names ]
+                        not in classifier_names]
     metrics_scores = dict((metric[0], pd.DataFrame(data=np.zeros((2,
-                                                                  len(classifier_names))),
-                                                index=["train", "test"],
-                                                columns=classifier_names))
+                                                                  len(
+                                                                      classifier_names))),
+                                                   index=["train", "test"],
+                                                   columns=classifier_names))
                           for metric in metrics)
 
     for metric in metrics:
         for classifierResult in results:
-            metrics_scores[metric[0]].loc["train", classifierResult.get_classifier_name()] = classifierResult.metrics_scores[metric[0]][0]
+            metrics_scores[metric[0]].loc[
+                "train", classifierResult.get_classifier_name()] = \
+            classifierResult.metrics_scores[metric[0]][0]
             metrics_scores[metric[0]].loc[
                 "test", classifierResult.get_classifier_name()] = \
-            classifierResult.metrics_scores[metric[0]][1]
+                classifierResult.metrics_scores[metric[0]][1]
 
     return metrics_scores
 
@@ -422,10 +439,11 @@ def get_example_errors_biclass(groud_truth, results):
 
     for classifier_result in results:
         error_on_examples = np.equal(classifier_result.full_labels_pred,
-                                   groud_truth).astype(int)
+                                     groud_truth).astype(int)
         unseen_examples = np.where(groud_truth == -100)[0]
         error_on_examples[unseen_examples] = -100
-        example_errors[classifier_result.get_classifier_name()] = error_on_examples
+        example_errors[
+            classifier_result.get_classifier_name()] = error_on_examples
     return example_errors
 
 
@@ -474,7 +492,8 @@ def sort_by_test_score(train_scores, test_scores, names, train_STDs=None,
     return sorted_names, sorted_train_scores, sorted_test_scores, sorted_train_STDs, sorted_test_STDs
 
 
-def publish_metrics_graphs(metrics_scores, directory, database_name, labels_names):
+def publish_metrics_graphs(metrics_scores, directory, database_name,
+                           labels_names):
     r"""Used to sort the results (names and both scores) in descending test score order.
 
     Parameters
@@ -493,25 +512,25 @@ def publish_metrics_graphs(metrics_scores, directory, database_name, labels_name
     -------
     results
     """
-    results=[]
+    results = []
     for metric_name, metric_dataframe in metrics_scores.items():
         logging.debug(
             "Start:\t Biclass score graph generation for " + metric_name)
         train_scores, test_scores, classifier_names, \
-        file_name, nb_results,results = init_plot(results, metric_name,
-                                                  metric_dataframe, directory,
-                                                  database_name, labels_names)
+        file_name, nb_results, results = init_plot(results, metric_name,
+                                                   metric_dataframe, directory,
+                                                   database_name, labels_names)
 
         plot_metric_scores(train_scores, test_scores, classifier_names,
                            nb_results, metric_name, file_name,
-                           tag=" "+" vs ".join(labels_names))
-        logging.debug("Done:\t Biclass score graph generation for "+metric_name)
+                           tag=" " + " vs ".join(labels_names))
+        logging.debug(
+            "Done:\t Biclass score graph generation for " + metric_name)
     return results
 
 
 def init_plot(results, metric_name, metric_dataframe,
               directory, database_name, labels_names):
-
     train = np.array(metric_dataframe.loc["train"])
     test = np.array(metric_dataframe.loc["test"])
     classifier_names = np.array(metric_dataframe.columns)
@@ -526,6 +545,7 @@ def init_plot(results, metric_name, metric_dataframe,
                 zip(classifier_names, test, np.zeros(len(test)))]
     return train, test, classifier_names, file_name, nb_results, results
 
+
 def gen_error_data(example_errors):
     r"""Used to format the error data in order to plot it efficiently. The data is saves in a `.csv` file.
 
@@ -570,15 +590,17 @@ def gen_error_data(example_errors):
         try:
             data_2d[:, classifierIndex] = error_on_examples
         except:
-            import pdb;pdb.set_trace()
+            import pdb;
+            pdb.set_trace()
     error_on_examples = -1 * np.sum(data_2d, axis=1) / nb_classifiers
     return nb_classifiers, nb_examples, classifiers_names, data_2d, error_on_examples
 
 
-def publish_example_errors(example_errors, directory, databaseName, labels_names, example_ids, labels):
+def publish_example_errors(example_errors, directory, databaseName,
+                           labels_names, example_ids, labels):
     logging.debug("Start:\t Biclass Label analysis figure generation")
 
-    base_file_name = os.path.join(directory,  databaseName + "-" + "_vs_".join(
+    base_file_name = os.path.join(directory, databaseName + "-" + "_vs_".join(
         labels_names) + "-")
 
     nb_classifiers, nb_examples, classifiers_names, \
@@ -597,25 +619,28 @@ def publish_example_errors(example_errors, directory, databaseName, labels_names
     logging.debug("Done:\t Biclass Label analysis figures generation")
 
 
-def publish_feature_importances(feature_importances, directory, database_name, feature_stds=None):
+def publish_feature_importances(feature_importances, directory, database_name,
+                                feature_stds=None):
     for view_name, feature_importance in feature_importances.items():
         if not os.path.exists(os.path.join(directory, "feature_importances")):
             os.mkdir(os.path.join(directory, "feature_importances"))
-        file_name = os.path.join(directory, "feature_importances" ,
+        file_name = os.path.join(directory, "feature_importances",
                                  database_name + "-" + view_name
                                  + "-feature_importances")
         if feature_stds is not None:
             feature_std = feature_stds[view_name]
-            feature_std.to_csv(file_name+"_dataframe_stds.csv")
+            feature_std.to_csv(file_name + "_dataframe_stds.csv")
         else:
             feature_std = pd.DataFrame(data=np.zeros(feature_importance.shape),
                                        index=feature_importance.index,
                                        columns=feature_importance.columns)
-        feature_importance.to_csv(file_name+"_dataframe.csv")
+        feature_importance.to_csv(file_name + "_dataframe.csv")
         hover_text = [["-Feature :" + str(feature_name) +
-                       "<br>-Classifier : "+classifier_name+
-                       "<br>-Importance : "+str(feature_importance.loc[feature_name][classifier_name])+
-                       "<br>-STD : " + str(feature_std.loc[feature_name][classifier_name])
+                       "<br>-Classifier : " + classifier_name +
+                       "<br>-Importance : " + str(
+            feature_importance.loc[feature_name][classifier_name]) +
+                       "<br>-STD : " + str(
+            feature_std.loc[feature_name][classifier_name])
                        for classifier_name in list(feature_importance.columns)]
                       for feature_name in list(feature_importance.index)]
         fig = plotly.graph_objs.Figure(data=plotly.graph_objs.Heatmap(
@@ -629,8 +654,8 @@ def publish_feature_importances(feature_importances, directory, database_name, f
         fig.update_layout(
             xaxis={"showgrid": False, "showticklabels": False, "ticks": ''},
             yaxis={"showgrid": False, "showticklabels": False, "ticks": ''})
-        fig.update_layout(paper_bgcolor = 'rgba(0,0,0,0)',
-                          plot_bgcolor = 'rgba(0,0,0,0)')
+        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
+                          plot_bgcolor='rgba(0,0,0,0)')
         plotly.offline.plot(fig, filename=file_name + ".html", auto_open=False)
 
         del fig
@@ -675,22 +700,27 @@ def get_feature_importances(result, feature_names=None):
     for classifier_result in result:
         if isinstance(classifier_result, MonoviewResult):
             if classifier_result.view_name not in feature_importances:
-                feature_importances[classifier_result.view_name] = pd.DataFrame(index=feature_names)
+                feature_importances[classifier_result.view_name] = pd.DataFrame(
+                    index=feature_names)
             if hasattr(classifier_result.clf, 'feature_importances_'):
-                feature_importances[classifier_result.view_name][classifier_result.classifier_name] = classifier_result.clf.feature_importances_
+                feature_importances[classifier_result.view_name][
+                    classifier_result.classifier_name] = classifier_result.clf.feature_importances_
             else:
-                feature_importances[classifier_result.view_name][classifier_result.classifier_name] = np.zeros(classifier_result.n_features)
+                feature_importances[classifier_result.view_name][
+                    classifier_result.classifier_name] = np.zeros(
+                    classifier_result.n_features)
     return feature_importances
 
 
-def publish_tracebacks(directory, database_name, labels_names, tracebacks, iter_index):
+def publish_tracebacks(directory, database_name, labels_names, tracebacks,
+                       iter_index):
     if tracebacks:
         with open(os.path.join(directory, database_name +
-                                          "-iter"+str(iter_index) +
+                                          "-iter" + str(iter_index) +
                                           "-tacebacks.txt"),
                   "w") as traceback_file:
             failed_list = save_dict_to_text(tracebacks, traceback_file)
-        flagged_list = [_ + "-iter"+str(iter_index) for _ in failed_list]
+        flagged_list = [_ + "-iter" + str(iter_index) for _ in failed_list]
     else:
         flagged_list = {}
     return flagged_list
@@ -725,12 +755,11 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
     """
     logging.debug("Srart:\t Analzing all biclass resuls")
     iter_results = {"metrics_scores": [i for i in range(stats_iter)],
-               "example_errors": [i for i in range(stats_iter)],
-               "feature_importances": [i for i in range(stats_iter)]}
+                    "example_errors": [i for i in range(stats_iter)],
+                    "feature_importances": [i for i in range(stats_iter)]}
     flagged_tracebacks_list = []
     fig_errors = []
     for iter_index, result, tracebacks in results:
-
         arguments = get_arguments(benchmark_argument_dictionaries, iter_index)
 
         metrics_scores = get_metrics_scores_biclass(metrics, result)
@@ -740,9 +769,11 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
 
         database_name = arguments["args"]["name"]
         labels_names = [arguments["labels_dictionary"][0],
-                       arguments["labels_dictionary"][1]]
+                        arguments["labels_dictionary"][1]]
 
-        flagged_tracebacks_list += publish_tracebacks(directory, database_name, labels_names, tracebacks, iter_index)
+        flagged_tracebacks_list += publish_tracebacks(directory, database_name,
+                                                      labels_names, tracebacks,
+                                                      iter_index)
         res = publish_metrics_graphs(metrics_scores, directory, database_name,
                                      labels_names)
         publish_example_errors(example_errors, directory, database_name,
@@ -750,8 +781,6 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
         publish_feature_importances(feature_importances, directory,
                                     database_name)
 
-
-
         iter_results["metrics_scores"][iter_index] = metrics_scores
         iter_results["example_errors"][iter_index] = example_errors
         iter_results["feature_importances"][iter_index] = feature_importances
@@ -931,7 +960,7 @@ def numpy_mean_and_std(scores_array):
 def publish_all_metrics_scores(iter_results, directory,
                                data_base_name, stats_iter,
                                min_size=10):
-    results=[]
+    results = []
     if not os.path.exists(os.path.dirname(os.path.join(directory, "a"))):
         try:
             os.makedirs(os.path.dirname(os.path.join(directory, "a")))
@@ -953,9 +982,9 @@ def publish_all_metrics_scores(iter_results, directory,
         plot_metric_scores(train, test, names, nbResults,
                            metric_name, file_name, tag=" averaged",
                            train_STDs=train_std, test_STDs=test_std)
-        results+=[[classifier_name, metric_name, test_mean, test_std]
-                  for classifier_name, test_mean, test_std
-                  in zip(names, test, test_std)]
+        results += [[classifier_name, metric_name, test_mean, test_std]
+                    for classifier_name, test_mean, test_std
+                    in zip(names, test, test_std)]
     return results
 
 
@@ -964,17 +993,18 @@ def gen_error_data_glob(iter_results, stats_iter):
     nb_classifiers = len(iter_results)
     data = np.zeros((nb_examples, nb_classifiers), dtype=int)
     classifier_names = []
-    for clf_index, (classifier_name, error_data) in enumerate(iter_results.items()):
+    for clf_index, (classifier_name, error_data) in enumerate(
+            iter_results.items()):
         data[:, clf_index] = error_data
         classifier_names.append(classifier_name)
-    error_on_examples = -1 * np.sum(data, axis=1) + (nb_classifiers * stats_iter)
+    error_on_examples = -1 * np.sum(data, axis=1) + (
+                nb_classifiers * stats_iter)
     return nb_examples, nb_classifiers, data, error_on_examples, classifier_names
 
 
 def publish_all_example_errors(iter_results, directory,
                                stats_iter,
                                example_ids, labels):
-
     logging.debug(
         "Start:\t Global biclass label analysis figure generation")
 
@@ -982,12 +1012,13 @@ def publish_all_example_errors(iter_results, directory,
     error_on_examples, classifier_names = gen_error_data_glob(iter_results,
                                                               stats_iter)
 
-    np.savetxt(os.path.join(directory,  "clf_errors.csv"), data, delimiter=",")
-    np.savetxt(os.path.join(directory,  "example_errors.csv"), error_on_examples,
+    np.savetxt(os.path.join(directory, "clf_errors.csv"), data, delimiter=",")
+    np.savetxt(os.path.join(directory, "example_errors.csv"), error_on_examples,
                delimiter=",")
 
     plot_2d(data, classifier_names, nbClassifiers, nbExamples,
-            os.path.join(directory, ""), stats_iter=stats_iter, example_ids=example_ids, labels=labels)
+            os.path.join(directory, ""), stats_iter=stats_iter,
+            example_ids=example_ids, labels=labels)
     plot_errors_bar(error_on_examples, nbClassifiers * stats_iter,
                     nbExamples, os.path.join(directory, ""))
 
@@ -1039,9 +1070,10 @@ def publish_all_example_errors(iter_results, directory,
 
 def gen_classifiers_dict(results, metrics):
     classifiers_dict = dict((classifier_name, classifierIndex)
-                           for classifierIndex, classifier_name
-                           in enumerate(
-        list(results[list(results.keys())[0]]["metrics_scores"][0][metrics[0][0]].columns)))
+                            for classifierIndex, classifier_name
+                            in enumerate(
+        list(results[list(results.keys())[0]]["metrics_scores"][0][
+                 metrics[0][0]].columns)))
     return classifiers_dict, len(classifiers_dict)
 
 
@@ -1058,10 +1090,13 @@ def add_new_labels_combination(iterBiclassResults, labelsComination,
     return iterBiclassResults
 
 
-def add_new_metric(iter_biclass_results, metric, labels_combination, nb_classifiers,
+def add_new_metric(iter_biclass_results, metric, labels_combination,
+                   nb_classifiers,
                    stats_iter):
-    if metric[0] not in iter_biclass_results[labels_combination]["metrics_scores"]:
-        iter_biclass_results[labels_combination]["metrics_scores"][metric[0]] = {
+    if metric[0] not in iter_biclass_results[labels_combination][
+        "metrics_scores"]:
+        iter_biclass_results[labels_combination]["metrics_scores"][
+            metric[0]] = {
             "train_scores":
                 np.zeros((nb_classifiers, stats_iter)),
             "test_scores":
@@ -1114,7 +1149,8 @@ def format_previous_results(biclass_results):
             "std"] = dataframe.groupby(dataframe.index).std(ddof=0)
 
     importance_concat_dict = {}
-    for iter_index, view_feature_importances in enumerate(biclass_results["feature_importances"]):
+    for iter_index, view_feature_importances in enumerate(
+            biclass_results["feature_importances"]):
         for view_name, feature_importances in view_feature_importances.items():
             if view_name not in importance_concat_dict:
                 importance_concat_dict[view_name] = feature_importances
@@ -1123,10 +1159,11 @@ def format_previous_results(biclass_results):
                     [importance_concat_dict[view_name], feature_importances])
 
     for view_name, dataframe in importance_concat_dict.items():
-        feature_importances_analysis[view_name] = dataframe.groupby(dataframe.index).mean()
-
-        feature_importances_stds[view_name] = dataframe.groupby(dataframe.index).std(ddof=0)
+        feature_importances_analysis[view_name] = dataframe.groupby(
+            dataframe.index).mean()
 
+        feature_importances_stds[view_name] = dataframe.groupby(
+            dataframe.index).std(ddof=0)
 
     added_example_errors = {}
     for example_errors in biclass_results["example_errors"]:
@@ -1136,7 +1173,8 @@ def format_previous_results(biclass_results):
             else:
                 added_example_errors[classifier_name] += errors
     error_analysis = added_example_errors
-    return metrics_analysis, error_analysis, feature_importances_analysis, feature_importances_stds, biclass_results["labels"]
+    return metrics_analysis, error_analysis, feature_importances_analysis, feature_importances_stds, \
+           biclass_results["labels"]
 
 
 def analyze_all(biclass_results, stats_iter, directory, data_base_name,
@@ -1149,12 +1187,13 @@ def analyze_all(biclass_results, stats_iter, directory, data_base_name,
     results = publish_all_metrics_scores(metrics_analysis,
                                          directory,
                                          data_base_name, stats_iter)
-    publish_all_example_errors(error_analysis, directory,stats_iter,
+    publish_all_example_errors(error_analysis, directory, stats_iter,
                                example_ids, labels)
     publish_feature_importances(feature_importances, directory,
-                                    data_base_name,  feature_importances_stds)
+                                data_base_name, feature_importances_stds)
     return results
 
+
 # def analyze_iter_multiclass(multiclass_results, directory, stats_iter, metrics,
 #                            data_base_name, nb_examples, example_ids, multiclass_labels):
 #     """Used to mean the multiclass results on the iterations executed with different random states"""
@@ -1198,20 +1237,21 @@ def analyze_all(biclass_results, stats_iter, directory, data_base_name,
 
 
 def save_failed(failed_list, directory):
-    with open(os.path.join(directory, "failed_algorithms.txt"), "w") as failed_file:
-        failed_file.write("The following algorithms sent an error, the tracebacks are stored in the coressponding directory :\n")
-        failed_file.write(", \n".join(failed_list)+".")
+    with open(os.path.join(directory, "failed_algorithms.txt"),
+              "w") as failed_file:
+        failed_file.write(
+            "The following algorithms sent an error, the tracebacks are stored in the coressponding directory :\n")
+        failed_file.write(", \n".join(failed_list) + ".")
 
 
 def get_results(results, stats_iter, benchmark_argument_dictionaries,
                 metrics, directory, example_ids, labels):
-
     """Used to analyze the results of the previous benchmarks"""
     data_base_name = benchmark_argument_dictionaries[0]["args"]["name"]
 
-
-    results_means_std, biclass_results, flagged_failed = analyze_iterations(results, benchmark_argument_dictionaries,
-                                                                            stats_iter, metrics, example_ids, labels)
+    results_means_std, biclass_results, flagged_failed = analyze_iterations(
+        results, benchmark_argument_dictionaries,
+        stats_iter, metrics, example_ids, labels)
     if flagged_failed:
         save_failed(flagged_failed, directory)
 
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/base.py b/multiview_platform/mono_multi_view_classifiers/utils/base.py
index 421ddb1944818f141d2095be2c79ffffe721963a..e55e33deaf590f1d018538cd0c833121c8a8dc6c 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/base.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/base.py
@@ -1,8 +1,5 @@
 import numpy as np
-import pickle
 from sklearn.base import BaseEstimator
-from matplotlib.ticker import FuncFormatter
-import matplotlib.pyplot as plt
 
 
 class BaseClassifier(BaseEstimator, ):
@@ -32,7 +29,7 @@ class BaseClassifier(BaseEstimator, ):
             return [(param_name,
                      np.array(detector.cv_results_["param_" + param_name]))
                     if param_name not in self.classed_params else (
-            param_name, classed_dict[param_name])
+                param_name, classed_dict[param_name])
                     for param_name in self.param_names]
         else:
             return [()]
@@ -43,8 +40,8 @@ class BaseClassifier(BaseEstimator, ):
 
     def params_to_string(self):
         return ", ".join(
-                [param_name + " : " + self.to_str(param_name) for param_name in
-                 self.param_names])
+            [param_name + " : " + self.to_str(param_name) for param_name in
+             self.param_names])
 
     def get_config(self):
         if self.param_names:
@@ -66,7 +63,7 @@ class BaseClassifier(BaseEstimator, ):
         return ""
 
     def accepts_multi_class(self, random_state, n_samples=10, dim=2,
-                           n_classes=3):
+                            n_classes=3):
         if int(n_samples / n_classes) < 1:
             raise ValueError(
                 "n_samples ({}) / n_classe ({}) must be over 1".format(
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py b/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
index f504d86d33f7af54e537f2f789b98e1085dc7f12..7544cdae992fc1d8d901b2c944e041b616a78b9b 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
@@ -1,6 +1,7 @@
-import yaml
 import os
 
+import yaml
+
 
 def get_the_args(path_to_config_file="../config_files/config.yml"):
     """
@@ -22,7 +23,7 @@ def get_the_args(path_to_config_file="../config_files/config.yml"):
 
 
 def pass_default_config(log=True,
-                        name=["plausible",],
+                        name=["plausible", ],
                         label="_",
                         file_type=".hdf5",
                         views=None,
@@ -40,9 +41,9 @@ def pass_default_config(log=True,
                         nb_folds=5,
                         nb_class=None,
                         classes=None,
-                        type=["multiview",],
-                        algos_monoview=["all" ],
-                        algos_multiview=["svm_jumbo_fusion",],
+                        type=["multiview", ],
+                        algos_monoview=["all"],
+                        algos_multiview=["svm_jumbo_fusion", ],
                         stats_iter=2,
                         metrics=["accuracy_score", "f1_score"],
                         metric_princ="f1_score",
@@ -79,15 +80,15 @@ def pass_default_config(log=True,
     :param hps_iter:
     :return:
     """
-    args = dict((key, value) for key, value in locals().items() if key !="kwargs")
+    args = dict(
+        (key, value) for key, value in locals().items() if key != "kwargs")
     args = dict(args, **kwargs)
     return args
 
 
-
 def save_config(directory, arguments):
     """
     Saves the config file in the result directory.
     """
     with open(os.path.join(directory, "config_file.yml"), "w") as stream:
-        yaml.dump(arguments, stream)
\ No newline at end of file
+        yaml.dump(arguments, stream)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
index 2e1ec8fe92164370534dcc0cb0f95caa5532de51..f107b2ea848e1febeeade4b7df85711fd0780b86 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
@@ -1,14 +1,15 @@
+import errno
 import logging
 import os
 import select
 import sys
-import errno
 from abc import abstractmethod
 
 import h5py
 import numpy as np
 from scipy import sparse
 
+
 # from . import get_multiview_db as DB
 
 class Dataset():
@@ -30,7 +31,8 @@ class Dataset():
         pass
 
     @abstractmethod
-    def filter(self, labels, label_names, example_indices, view_names, path=None):
+    def filter(self, labels, label_names, example_indices, view_names,
+               path=None):
         pass
 
     def init_example_indces(self, example_indices=None):
@@ -70,7 +72,7 @@ class Dataset():
         for view_index in view_indices:
             view_data = self.get_v(view_index, example_indices=example_indices)
             nb_features = view_data.shape[1]
-            view_limits.append(view_limits[-1]+nb_features)
+            view_limits.append(view_limits[-1] + nb_features)
         concat_views = np.concatenate([self.get_v(view_index,
                                                   example_indices=example_indices)
                                        for view_index in view_indices], axis=1)
@@ -80,26 +82,28 @@ class Dataset():
         selected_labels = [self.get_label_names().index(label_name.decode())
                            if isinstance(label_name, bytes)
                            else self.get_label_names().index(label_name)
-                                   for label_name in selected_label_names]
+                           for label_name in selected_label_names]
         selected_indices = np.array([index
-                                     for index, label in enumerate(self.get_labels())
+                                     for index, label in
+                                     enumerate(self.get_labels())
                                      if label in selected_labels])
         labels = np.array([selected_labels.index(self.get_labels()[idx])
                            for idx in selected_indices])
         return labels, selected_label_names, selected_indices
 
-
     def select_views_and_labels(self, nb_labels=None,
                                 selected_label_names=None, random_state=None,
-                                view_names = None, path_for_new="../data/"):
+                                view_names=None, path_for_new="../data/"):
         if view_names is None and selected_label_names is None and nb_labels is None:
             pass
         else:
             selected_label_names = self.check_selected_label_names(nb_labels,
-                                                               selected_label_names,
-                                                               random_state)
-            labels, label_names, example_indices = self.select_labels(selected_label_names)
-            self.filter(labels, label_names, example_indices, view_names, path_for_new)
+                                                                   selected_label_names,
+                                                                   random_state)
+            labels, label_names, example_indices = self.select_labels(
+                selected_label_names)
+            self.filter(labels, label_names, example_indices, view_names,
+                        path_for_new)
         labels_dictionary = dict(
             (labelIndex, labelName) for labelIndex, labelName in
             enumerate(self.get_label_names()))
@@ -108,14 +112,15 @@ class Dataset():
     def check_selected_label_names(self, nb_labels=None,
                                    selected_label_names=None,
                                    random_state=np.random.RandomState(42)):
-        if selected_label_names is None or nb_labels is None or len(selected_label_names) < nb_labels:
+        if selected_label_names is None or nb_labels is None or len(
+                selected_label_names) < nb_labels:
             if selected_label_names is None:
                 nb_labels_to_add = nb_labels
                 selected_label_names = []
             elif nb_labels is not None:
                 nb_labels_to_add = nb_labels - len(selected_label_names)
             else:
-                nb_labels_to_add=0
+                nb_labels_to_add = 0
             labels_names_to_choose = [available_label_name
                                       for available_label_name
                                       in self.get_label_names()
@@ -137,7 +142,8 @@ class Dataset():
 class RAMDataset(Dataset):
 
     def __init__(self, views=None, labels=None, are_sparse=False,
-                 view_names=None, labels_names=None, example_ids=None, name=None):
+                 view_names=None, labels_names=None, example_ids=None,
+                 name=None):
         self.saved_on_disk = False
         self.views = views
         self.labels = np.asarray(labels)
@@ -151,7 +157,7 @@ class RAMDataset(Dataset):
         self.view_dict = dict((view_name, view_ind)
                               for view_name, view_ind
                               in zip(view_names, range(len(views))))
-        self.name=name
+        self.name = name
         self.nb_view = len(self.views)
         self.is_temp = False
 
@@ -210,21 +216,25 @@ class RAMDataset(Dataset):
         example_indices = self.init_example_indces(example_indices)
         return len(np.unique(self.labels[example_indices]))
 
-    def filter(self, labels, label_names, example_indices, view_names, path=None):
+    def filter(self, labels, label_names, example_indices, view_names,
+               path=None):
         if self.example_ids is not None:
             self.example_ids = self.example_ids[example_indices]
         self.labels = self.labels[example_indices]
         self.labels_names = self.labels_names[np.unique(self.labels)]
-        self.labels = np.array([np.where(label == np.unique(self.labels))[0] for label in self.labels])
+        self.labels = np.array(
+            [np.where(label == np.unique(self.labels))[0] for label in
+             self.labels])
         self.view_names = view_names
         new_views = []
         for new_view_ind, view_name in enumerate(self.view_names):
-            new_views.append(self.views[self.view_dict[view_name]][example_indices, :])
+            new_views.append(
+                self.views[self.view_dict[view_name]][example_indices, :])
         self.views = new_views
         self.view_dict = dict((view_name, view_ind)
                               for view_ind, view_name
                               in enumerate(self.view_names))
-        self.nb_view=len(self.views)
+        self.nb_view = len(self.views)
 
     def get_view_dict(self):
         return self.view_dict
@@ -289,13 +299,14 @@ class HDF5Dataset(Dataset):
     def __init__(self, views=None, labels=None, are_sparse=False,
                  file_name="dataset.hdf5", view_names=None, path="",
                  hdf5_file=None, labels_names=None, is_temp=False,
-                 example_ids=None,):
+                 example_ids=None, ):
         self.is_temp = False
         if hdf5_file is not None:
-            self.dataset=hdf5_file
+            self.dataset = hdf5_file
             self.init_attrs()
         else:
-            if not os.path.exists(os.path.dirname(os.path.join(path, file_name))):
+            if not os.path.exists(
+                    os.path.dirname(os.path.join(path, file_name))):
                 try:
                     os.makedirs(os.path.dirname(os.path.join(path, file_name)))
                 except OSError as exc:
@@ -303,13 +314,16 @@ class HDF5Dataset(Dataset):
                         raise
             dataset_file = h5py.File(os.path.join(path, file_name), "w")
             if view_names is None:
-                view_names = ["View"+str(index) for index in range(len(views))]
+                view_names = ["View" + str(index) for index in
+                              range(len(views))]
             if isinstance(are_sparse, bool):
                 are_sparse = [are_sparse for _ in views]
-            for view_index, (view_name, view, is_sparse) in enumerate(zip(view_names, views, are_sparse)):
-                view_dataset = dataset_file.create_dataset("View" + str(view_index),
-                                                      view.shape,
-                                                      data=view)
+            for view_index, (view_name, view, is_sparse) in enumerate(
+                    zip(view_names, views, are_sparse)):
+                view_dataset = dataset_file.create_dataset(
+                    "View" + str(view_index),
+                    view.shape,
+                    data=view)
                 view_dataset.attrs["name"] = view_name
                 view_dataset.attrs["sparse"] = is_sparse
             labels_dataset = dataset_file.create_dataset("Labels",
@@ -318,7 +332,8 @@ class HDF5Dataset(Dataset):
             if labels_names is None:
                 labels_names = [str(index) for index in np.unique(labels)]
             labels_dataset.attrs["names"] = [label_name.encode()
-                                             if not isinstance(label_name, bytes)
+                                             if not isinstance(label_name,
+                                                               bytes)
                                              else label_name
                                              for label_name in labels_names]
             meta_data_grp = dataset_file.create_group("Metadata")
@@ -329,13 +344,13 @@ class HDF5Dataset(Dataset):
             self.update_hdf5_dataset(os.path.join(path, file_name))
             if example_ids is not None:
                 example_ids = [example_id if not is_just_number(example_id)
-                               else "ID_"+example_id for example_id in example_ids]
+                               else "ID_" + example_id for example_id in
+                               example_ids]
                 self.example_ids = example_ids
             else:
-                self.example_ids = ["ID_"+str(i)
+                self.example_ids = ["ID_" + str(i)
                                     for i in range(labels.shape[0])]
 
-
     def rm(self):
         """
         Method used to delete the dataset file on the disk if the dataset is
@@ -364,8 +379,7 @@ class HDF5Dataset(Dataset):
             The view's name.
 
         """
-        return self.dataset["View"+str(view_idx)].attrs["name"]
-
+        return self.dataset["View" + str(view_idx)].attrs["name"]
 
     def init_attrs(self):
         """
@@ -381,11 +395,12 @@ class HDF5Dataset(Dataset):
         if "example_ids" in self.dataset["Metadata"].keys():
             self.example_ids = [example_id.decode()
                                 if not is_just_number(example_id.decode())
-                                else "ID_"+example_id.decode()
-                                for example_id in self.dataset["Metadata"]["example_ids"]]
+                                else "ID_" + example_id.decode()
+                                for example_id in
+                                self.dataset["Metadata"]["example_ids"]]
         else:
-                self.example_ids = [str(i) for i in range(self.dataset["Labels"].shape[0])]
-
+            self.example_ids = [str(i) for i in
+                                range(self.dataset["Labels"].shape[0])]
 
     def get_nb_examples(self):
         """
@@ -403,7 +418,8 @@ class HDF5Dataset(Dataset):
         """
         view_dict = {}
         for view_index in range(self.nb_view):
-            view_dict[self.dataset["View" + str(view_index)].attrs["name"]] = view_index
+            view_dict[self.dataset["View" + str(view_index)].attrs[
+                "name"]] = view_index
         return view_dict
 
     def get_label_names(self, decode=True, example_indices=None):
@@ -425,14 +441,15 @@ class HDF5Dataset(Dataset):
         selected_labels = self.get_labels(example_indices)
         if decode:
             return [label_name.decode("utf-8")
-                    for label, label_name in enumerate(self.dataset["Labels"].attrs["names"])
+                    for label, label_name in
+                    enumerate(self.dataset["Labels"].attrs["names"])
                     if label in selected_labels]
         else:
             return [label_name
-                    for label, label_name in enumerate(self.dataset["Labels"].attrs["names"])
+                    for label, label_name in
+                    enumerate(self.dataset["Labels"].attrs["names"])
                     if label in selected_labels]
 
-
     def get_v(self, view_index, example_indices=None):
         """
         Selects the view to extract
@@ -456,13 +473,15 @@ class HDF5Dataset(Dataset):
             # example_indices = example_indices[sorted_indices]
 
             if not self.dataset["View" + str(view_index)].attrs["sparse"]:
-                return self.dataset["View" + str(view_index)][()][example_indices, :]#[np.argsort(sorted_indices), :]
+                return self.dataset["View" + str(view_index)][()][
+                       example_indices, :]  # [np.argsort(sorted_indices), :]
             else:
                 sparse_mat = sparse.csr_matrix(
                     (self.dataset["View" + str(view_index)]["data"][()],
                      self.dataset["View" + str(view_index)]["indices"][()],
                      self.dataset["View" + str(view_index)]["indptr"][()]),
-                    shape=self.dataset["View" + str(view_index)].attrs["shape"])[
+                    shape=self.dataset["View" + str(view_index)].attrs[
+                        "shape"])[
                              example_indices, :][
                              np.argsort(sorted_indices), :]
 
@@ -470,7 +489,7 @@ class HDF5Dataset(Dataset):
 
     def get_shape(self, view_index=0, example_indices=None):
         """Gets the shape of the needed view"""
-        return self.get_v(view_index,example_indices=example_indices).shape
+        return self.get_v(view_index, example_indices=example_indices).shape
 
     def get_nb_class(self, example_indices=None):
         """Gets the number of class of the dataset"""
@@ -484,10 +503,12 @@ class HDF5Dataset(Dataset):
     def copy_view(self, target_dataset=None, source_view_name=None,
                   target_view_index=None, example_indices=None):
         example_indices = self.init_example_indces(example_indices)
-        new_d_set = target_dataset.create_dataset("View"+str(target_view_index),
+        new_d_set = target_dataset.create_dataset(
+            "View" + str(target_view_index),
             data=self.get_v(self.view_dict[source_view_name],
                             example_indices=example_indices))
-        for key, value in self.dataset["View"+str(self.view_dict[source_view_name])].attrs.items():
+        for key, value in self.dataset[
+            "View" + str(self.view_dict[source_view_name])].attrs.items():
             new_d_set.attrs[key] = value
 
     def init_view_names(self, view_names=None):
@@ -503,25 +524,38 @@ class HDF5Dataset(Dataset):
         self.is_temp = True
         self.init_attrs()
 
-    def filter(self, labels, label_names, example_indices, view_names, path=None):
-        dataset_file_path = os.path.join(path,self.get_name()+"_temp_filter.hdf5")
-        new_dataset_file = h5py.File(dataset_file_path,"w")
+    def filter(self, labels, label_names, example_indices, view_names,
+               path=None):
+        dataset_file_path = os.path.join(path,
+                                         self.get_name() + "_temp_filter.hdf5")
+        new_dataset_file = h5py.File(dataset_file_path, "w")
         self.dataset.copy("Metadata", new_dataset_file)
         if "example_ids" in self.dataset["Metadata"].keys():
             del new_dataset_file["Metadata"]["example_ids"]
-            ex_ids = new_dataset_file["Metadata"].create_dataset("example_ids", data=np.array(self.example_ids)[example_indices].astype(np.dtype("S10")))
+            ex_ids = new_dataset_file["Metadata"].create_dataset("example_ids",
+                                                                 data=np.array(
+                                                                     self.example_ids)[
+                                                                     example_indices].astype(
+                                                                     np.dtype(
+                                                                         "S10")))
         else:
             new_dataset_file["Metadata"].create_dataset("example_ids",
-                                                        (len(self.example_ids), ),
-                                                        data=np.array(self.example_ids).astype(np.dtype("S10")),
+                                                        (
+                                                        len(self.example_ids),),
+                                                        data=np.array(
+                                                            self.example_ids).astype(
+                                                            np.dtype("S10")),
                                                         dtype=np.dtype("S10"))
-        new_dataset_file["Metadata"].attrs["datasetLength"] = len(example_indices)
+        new_dataset_file["Metadata"].attrs["datasetLength"] = len(
+            example_indices)
         new_dataset_file["Metadata"].attrs["nbClass"] = np.unique(labels)
         new_dataset_file.create_dataset("Labels", data=labels)
         new_dataset_file["Labels"].attrs["names"] = [label_name.encode()
-                                                     if not isinstance(label_name, bytes)
+                                                     if not isinstance(
+            label_name, bytes)
                                                      else label_name
-                                                     for label_name in label_names]
+                                                     for label_name in
+                                                     label_names]
         view_names = self.init_view_names(view_names)
         new_dataset_file["Metadata"].attrs["nbView"] = len(view_names)
         for new_index, view_name in enumerate(view_names):
@@ -544,8 +578,8 @@ class HDF5Dataset(Dataset):
         self.dataset.copy("Labels", noisy_dataset)
         for view_index in range(self.nb_view):
             self.copy_view(target_dataset=noisy_dataset,
-                                   source_view_name=self.get_view_name(view_index),
-                                   target_view_index=view_index)
+                           source_view_name=self.get_view_name(view_index),
+                           target_view_index=view_index)
         for view_index in range(noisy_dataset["Metadata"].attrs["nbView"]):
             view_key = "View" + str(view_index)
             view_dset = noisy_dataset[view_key]
@@ -553,7 +587,8 @@ class HDF5Dataset(Dataset):
                 view_limits = self.dataset[
                     "Metadata/View" + str(view_index) + "_limits"][()]
             except:
-                import pdb;pdb.set_trace()
+                import pdb;
+                pdb.set_trace()
             view_ranges = view_limits[:, 1] - view_limits[:, 0]
             normal_dist = random_state.normal(0, noise_std, view_dset[()].shape)
             noise = normal_dist * view_ranges
@@ -567,18 +602,13 @@ class HDF5Dataset(Dataset):
         noisy_dataset.close()
         self.update_hdf5_dataset(noisy_dataset_path)
 
-
     # The following methods are hdf5 free
 
-
-
     def get_name(self):
         """Ony works if there are not multiple dots in the files name"""
         return self.dataset.filename.split('/')[-1].split('.')[0]
 
 
-
-
 def is_just_number(string):
     try:
         float(string)
@@ -586,6 +616,7 @@ def is_just_number(string):
     except ValueError:
         return False
 
+
 def datasets_already_exist(pathF, name, nbCores):
     """Used to check if it's necessary to copy datasets"""
     allDatasetExist = True
@@ -602,8 +633,11 @@ def extract_subset(matrix, used_indices):
         new_indptr = np.zeros(len(used_indices) + 1, dtype=int)
         oldindptr = matrix.indptr
         for exampleIndexIndex, exampleIndex in enumerate(used_indices):
-            new_indptr[exampleIndexIndex + 1] = new_indptr[exampleIndexIndex] + (
-                    oldindptr[exampleIndex + 1] - oldindptr[exampleIndex])
+            new_indptr[exampleIndexIndex + 1] = new_indptr[
+                                                    exampleIndexIndex] + (
+                                                        oldindptr[
+                                                            exampleIndex + 1] -
+                                                        oldindptr[exampleIndex])
         new_data = np.ones(new_indptr[-1], dtype=bool)
         new_indices = np.zeros(new_indptr[-1], dtype=int)
         old_indices = matrix.indices
@@ -667,6 +701,7 @@ def copy_hdf5(pathF, name, nbCores):
             datasetFile.copy("/" + dataset, newDataSet["/"])
         newDataSet.close()
 
+
 def delete_HDF5(benchmarkArgumentsDictionaries, nbCores, dataset):
     """Used to delete temporary copies at the end of the benchmark"""
     if nbCores > 1:
@@ -711,4 +746,4 @@ def get_examples_views_indices(dataset, examples_indices, view_indices, ):
         view_indices = np.arange(dataset.nb_view)
     if examples_indices is None:
         examples_indices = np.arange(dataset.get_nb_examples())
-    return examples_indices, view_indices
\ No newline at end of file
+    return examples_indices, view_indices
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/execution.py b/multiview_platform/mono_multi_view_classifiers/utils/execution.py
index 08244d0e0d07103baaf66f0f87c1c7b911728a82..0c82c6677c2976b90e408824338adaa0f95cebfc 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/execution.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/execution.py
@@ -21,7 +21,8 @@ def parse_the_args(arguments):
         fromfile_prefix_chars='@')
 
     groupStandard = parser.add_argument_group('Standard arguments')
-    groupStandard.add_argument('--config_path', metavar='STRING', action='store',
+    groupStandard.add_argument('--config_path', metavar='STRING',
+                               action='store',
                                help='Path to the hdf5 dataset or database '
                                     'folder (default: %(default)s)',
                                default='../config_files/config.yml')
@@ -151,16 +152,21 @@ def init_log_file(name, views, cl_type, log, debug, label,
         Reference to the main results directory for the benchmark.
     """
     if views is None:
-        views=[]
-    noise_string = "n_"+str(int(noise_std*100))
-    result_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), result_directory)
+        views = []
+    noise_string = "n_" + str(int(noise_std * 100))
+    result_directory = os.path.join(os.path.dirname(
+        os.path.dirname(os.path.dirname(os.path.realpath(__file__)))),
+                                    result_directory)
     if debug:
         result_directory = os.path.join(result_directory, name, noise_string,
-                                        "debug_started_" + time.strftime("%Y_%m_%d-%H_%M_%S") + "_" + label)
+                                        "debug_started_" + time.strftime(
+                                            "%Y_%m_%d-%H_%M_%S") + "_" + label)
     else:
-        result_directory = os.path.join(result_directory, name,  noise_string,
-                                        "started_" + time.strftime("%Y_%m_%d-%H_%M") + "_" + label)
-    log_file_name = time.strftime("%Y_%m_%d-%H_%M") + "-" + ''.join(cl_type) + "-" + "_".join(views) + "-" + name + "-LOG.log"
+        result_directory = os.path.join(result_directory, name, noise_string,
+                                        "started_" + time.strftime(
+                                            "%Y_%m_%d-%H_%M") + "_" + label)
+    log_file_name = time.strftime("%Y_%m_%d-%H_%M") + "-" + ''.join(
+        cl_type) + "-" + "_".join(views) + "-" + name + "-LOG.log"
     if os.path.exists(result_directory):
         raise NameError("The result dir already exists, wait 1 min and retry")
     log_file_path = os.path.join(result_directory, log_file_name)
@@ -236,7 +242,7 @@ def gen_k_folds(stats_iter, nb_folds, stats_iter_random_states):
         if isinstance(stats_iter_random_states, list):
             stats_iter_random_states = stats_iter_random_states[0]
         folds_list = [sklearn.model_selection.StratifiedKFold(n_splits=nb_folds,
-                                                             random_state=stats_iter_random_states,
+                                                              random_state=stats_iter_random_states,
                                                               shuffle=True)]
     return folds_list
 
@@ -265,9 +271,9 @@ def init_views(dataset_var, arg_views):
     if arg_views is not None:
         allowed_views = arg_views
         all_views = [str(dataset_var.get_view_name(view_index))
-                    if type(dataset_var.get_view_name(view_index)) != bytes
-                    else dataset_var.get_view_name(view_index).decode("utf-8")
-                    for view_index in range(nb_view)]
+                     if type(dataset_var.get_view_name(view_index)) != bytes
+                     else dataset_var.get_view_name(view_index).decode("utf-8")
+                     for view_index in range(nb_view)]
         views = []
         views_indices = []
         for view_index in range(nb_view):
@@ -314,17 +320,23 @@ def gen_direcorties_names(directory, stats_iter):
 def find_dataset_names(path, type, names):
     """This function goal is to browse the dataset directory and extrats all
      the needed dataset names."""
-    module_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+    module_path = os.path.dirname(
+        os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
     available_file_names = [file_name.strip().split(".")[0]
-                            for file_name in os.listdir(os.path.join(module_path, path))
+                            for file_name in
+                            os.listdir(os.path.join(module_path, path))
                             if file_name.endswith(type)]
     if names == ["all"]:
         return available_file_names
-    elif len(names)>1:
-        selected_names = [used_name for used_name in available_file_names if used_name in names]
+    elif len(names) > 1:
+        selected_names = [used_name for used_name in available_file_names if
+                          used_name in names]
         if not selected_names:
-            raise ValueError("None of the provided dataset names are available. Available datasets are {}".format(available_file_names))
-        return [used_name for used_name in available_file_names if used_name in names]
+            raise ValueError(
+                "None of the provided dataset names are available. Available datasets are {}".format(
+                    available_file_names))
+        return [used_name for used_name in available_file_names if
+                used_name in names]
     else:
         return names
 
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
index 7d200eb31875a1eb174b5311c39e8eb0aac3015c..4b061122d8cc2516c9166e5422d708df1ede31d9 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
@@ -1,6 +1,5 @@
 import errno
 import os
-import logging
 
 import h5py
 import numpy as np
@@ -12,9 +11,6 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
-
-
-
 def make_me_noisy(view_data, random_state, percentage=5):
     """used to introduce some noise in the generated data"""
     view_data = view_data.astype(bool)
@@ -32,19 +28,20 @@ def make_me_noisy(view_data, random_state, percentage=5):
 
 def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
                           label_names=["No".encode(), "Yes".encode(),
-                                     "Maybe".encode()],
+                                       "Maybe".encode()],
                           random_state=None, full=True, add_noise=False,
                           noise_std=0.15, nb_view=3, nb_examples=100,
                           nb_features=10):
     """Used to generate a plausible dataset to test the algorithms"""
 
-    if not os.path.exists(os.path.dirname(os.path.join(path, "plausible.hdf5"))):
+    if not os.path.exists(
+            os.path.dirname(os.path.join(path, "plausible.hdf5"))):
         try:
             os.makedirs(os.path.dirname(os.path.join(path, "plausible.hdf5")))
         except OSError as exc:
             if exc.errno != errno.EEXIST:
                 raise
-    example_ids = ["exmaple_id_"+str(i) for i in range(nb_examples)]
+    example_ids = ["exmaple_id_" + str(i) for i in range(nb_examples)]
     views = []
     view_names = []
     are_sparse = []
@@ -64,7 +61,7 @@ def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
                                                      nb_examples,
                                                      int(nb_examples / 12))
             for index in np.concatenate((fake_one_indices, fake_zero_indices)):
-                example_ids[index]+="noised"
+                example_ids[index] += "noised"
 
             view_data[fake_one_indices] = np.ones(
                 (len(fake_one_indices), nb_features))
@@ -75,11 +72,9 @@ def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
             view_names.append("ViewNumber" + str(view_index))
             are_sparse.append(False)
 
-
-
         dataset = RAMDataset(views=views, labels=labels,
-                              labels_names=label_names, view_names=view_names,
-                              are_sparse=are_sparse, example_ids=example_ids,
+                             labels_names=label_names, view_names=view_names,
+                             are_sparse=are_sparse, example_ids=example_ids,
                              name='plausible')
         labels_dictionary = {0: "No", 1: "Yes"}
         return dataset, labels_dictionary, "plausible"
@@ -89,10 +84,10 @@ def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
         scndBound = 2 * int(nb_examples / 3)
         thrdBound = nb_examples
         labels = np.array(
-                            [0 for _ in range(firstBound)] +
-                            [1 for _ in range(firstBound)] +
-                            [2 for _ in range(rest)]
-                        )
+            [0 for _ in range(firstBound)] +
+            [1 for _ in range(firstBound)] +
+            [2 for _ in range(rest)]
+        )
         for view_index in range(nb_view):
             view_data = np.array(
                 [np.zeros(nb_features) for _ in range(firstBound)] +
@@ -116,10 +111,10 @@ def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
             view_names.append("ViewNumber" + str(view_index))
             are_sparse.append(False)
         dataset = RAMDataset(views=views, labels=labels,
-                              labels_names=label_names, view_names=view_names,
-                              are_sparse=are_sparse,
-                              name="plausible",
-                              example_ids=example_ids)
+                             labels_names=label_names, view_names=view_names,
+                             are_sparse=are_sparse,
+                             name="plausible",
+                             example_ids=example_ids)
         labels_dictionary = {0: "No", 1: "Yes", 2: "Maybe"}
         return dataset, labels_dictionary, "plausible"
 
@@ -128,8 +123,10 @@ class DatasetError(Exception):
     def __init__(self, *args, **kwargs):
         Exception.__init__(self, *args, **kwargs)
 
+
 def get_classic_db_hdf5(views, path_f, name_DB, nb_class, asked_labels_names,
-                     random_state, full=False, add_noise=False, noise_std=0.15,
+                        random_state, full=False, add_noise=False,
+                        noise_std=0.15,
                         path_for_new="../data/"):
     """Used to load a hdf5 database"""
     if full:
@@ -143,8 +140,9 @@ def get_classic_db_hdf5(views, path_f, name_DB, nb_class, asked_labels_names,
         dataset_file = h5py.File(os.path.join(path_f, name_DB + ".hdf5"), "r")
         dataset = HDF5Dataset(hdf5_file=dataset_file)
         labels_dictionary = dataset.select_views_and_labels(nb_labels=nb_class,
-                                        selected_label_names=asked_labels_names,
-                                        view_names=views, random_state=random_state,
+                                                            selected_label_names=asked_labels_names,
+                                                            view_names=views,
+                                                            random_state=random_state,
                                                             path_for_new=path_for_new)
         dataset_name = dataset.get_name()
 
@@ -157,11 +155,12 @@ def get_classic_db_hdf5(views, path_f, name_DB, nb_class, asked_labels_names,
 
 
 def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
-                       random_state, full=False, add_noise=False, noise_std=0.15,
-                        delimiter=",", path_for_new="../data/"):
+                       random_state, full=False, add_noise=False,
+                       noise_std=0.15,
+                       delimiter=",", path_for_new="../data/"):
     # TODO : Update this one
     labels_names = np.genfromtxt(pathF + nameDB + "-labels-names.csv",
-                                dtype='str', delimiter=delimiter)
+                                 dtype='str', delimiter=delimiter)
     datasetFile = h5py.File(pathF + nameDB + ".hdf5", "w")
     labels = np.genfromtxt(pathF + nameDB + "-labels.csv", delimiter=delimiter)
     labelsDset = datasetFile.create_dataset("Labels", labels.shape, data=labels)
@@ -186,14 +185,17 @@ def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
     metaDataGrp.attrs["nbClass"] = len(labels_names)
     metaDataGrp.attrs["datasetLength"] = len(labels)
     datasetFile.close()
-    datasetFile, labelsDictionary, dataset_name = get_classic_db_hdf5(views, pathF, nameDB,
-                                                     NB_CLASS, askedLabelsNames,
-                                                     random_state, full,
-                                                     path_for_new=path_for_new)
+    datasetFile, labelsDictionary, dataset_name = get_classic_db_hdf5(views,
+                                                                      pathF,
+                                                                      nameDB,
+                                                                      NB_CLASS,
+                                                                      askedLabelsNames,
+                                                                      random_state,
+                                                                      full,
+                                                                      path_for_new=path_for_new)
 
     return datasetFile, labelsDictionary, dataset_name
 
-
 #
 # def get_classes(labels):
 #     labels_set = set(list(labels))
@@ -329,8 +331,6 @@ def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
 #             new_d_set.attrs[key] = value
 
 
-
-
 #
 # def add_gaussian_noise(dataset_file, random_state, path_f, dataset_name,
 #                        noise_std=0.15):
@@ -1315,4 +1315,4 @@ def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
 #     labels_dictionary = {0: "No", 1: "Yes"}
 #     datasetFile.close()
 #     datasetFile = h5py.File(pathF + "Fake.hdf5", "r")
-#     return datasetFile, labels_dictionary
\ No newline at end of file
+#     return datasetFile, labels_dictionary
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py b/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
index a02d8e0cc121e3e9280d004c502b72bff2c69c59..f5288d978f423ae3e8b2e236bcc735d42752a1d7 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
@@ -1,9 +1,9 @@
 import itertools
 import sys
+import traceback
 
 import matplotlib.pyplot as plt
 import numpy as np
-import traceback
 from scipy.stats import randint, uniform
 from sklearn.model_selection import RandomizedSearchCV
 
@@ -11,11 +11,12 @@ from .multiclass import get_mc_estim
 from .. import metrics
 
 
-def search_best_settings(dataset_var, labels, classifier_module, classifier_name,
-                        metrics, learning_indices, i_k_folds, random_state,
-                        directory, views_indices=None, nb_cores=1,
-                        searching_tool="randomized_search-equiv", n_iter=1,
-                        classifier_config=None):
+def search_best_settings(dataset_var, labels, classifier_module,
+                         classifier_name,
+                         metrics, learning_indices, i_k_folds, random_state,
+                         directory, views_indices=None, nb_cores=1,
+                         searching_tool="randomized_search-equiv", n_iter=1,
+                         classifier_config=None):
     """Used to select the right hyper-parameter optimization function
     to optimize hyper parameters"""
     if views_indices is None:
@@ -23,22 +24,26 @@ def search_best_settings(dataset_var, labels, classifier_module, classifier_name
     output_file_name = directory
     thismodule = sys.modules[__name__]
     if searching_tool is not "None":
-        searching_tool_method = getattr(thismodule, searching_tool.split("-")[0])
+        searching_tool_method = getattr(thismodule,
+                                        searching_tool.split("-")[0])
         best_settings, test_folds_preds = searching_tool_method(
             dataset_var, labels, "multiview", random_state, output_file_name,
             classifier_module, classifier_name, i_k_folds,
             nb_cores, metrics, n_iter, classifier_config,
-            learning_indices=learning_indices, view_indices=views_indices, equivalent_draws=searching_tool.endswith("equiv"))
+            learning_indices=learning_indices, view_indices=views_indices,
+            equivalent_draws=searching_tool.endswith("equiv"))
     else:
         best_settings = classifier_config
     return best_settings  # or well set clasifier ?
 
 
-def grid_search(dataset, classifier_name, views_indices=None, k_folds=None, n_iter=1,
+def grid_search(dataset, classifier_name, views_indices=None, k_folds=None,
+                n_iter=1,
                 **kwargs):
     """Used to perfom gridsearch on the classifiers"""
     pass
 
+
 class CustomUniform:
     """Used as a distribution returning a float between loc and loc + scale..
         It can be used with a multiplier agrument to be able to perform more complex generation
@@ -55,6 +60,7 @@ class CustomUniform:
         else:
             return unif
 
+
 class CustomRandint:
     """Used as a distribution returning a integer between low and high-1.
     It can be used with a multiplier agrument to be able to perform more complex generation
@@ -74,6 +80,7 @@ class CustomRandint:
     def get_nb_possibilities(self):
         return self.randint.b - self.randint.a
 
+
 def compute_possible_combinations(params_dict):
     n_possibs = np.ones(len(params_dict)) * np.inf
     for value_index, value in enumerate(params_dict.values()):
@@ -84,7 +91,8 @@ def compute_possible_combinations(params_dict):
     return np.prod(n_possibs)
 
 
-def get_test_folds_preds(X, y, cv, estimator, framework, available_indices=None):
+def get_test_folds_preds(X, y, cv, estimator, framework,
+                         available_indices=None):
     test_folds_prediction = []
     if framework == "monoview":
         folds = cv.split(np.arange(len(y)), y)
@@ -107,15 +115,19 @@ def get_test_folds_preds(X, y, cv, estimator, framework, available_indices=None)
     return test_folds_prediction
 
 
-def randomized_search(X, y, framework, random_state, output_file_name, classifier_module,
-                      classifier_name, folds=4, nb_cores=1, metric=["accuracy_score", None],
-                      n_iter=30, classifier_kwargs =None, learning_indices=None, view_indices=None,
+def randomized_search(X, y, framework, random_state, output_file_name,
+                      classifier_module,
+                      classifier_name, folds=4, nb_cores=1,
+                      metric=["accuracy_score", None],
+                      n_iter=30, classifier_kwargs=None, learning_indices=None,
+                      view_indices=None,
                       equivalent_draws=True):
-    estimator = getattr(classifier_module, classifier_name)(random_state=random_state,
-                                                            **classifier_kwargs)
+    estimator = getattr(classifier_module, classifier_name)(
+        random_state=random_state,
+        **classifier_kwargs)
     params_dict = estimator.gen_distribs()
     estimator = get_mc_estim(estimator, random_state,
-                             multiview=(framework=="multiview"),
+                             multiview=(framework == "multiview"),
                              y=y)
     if params_dict:
         metric_module = getattr(metrics, metric[0])
@@ -125,20 +137,22 @@ def randomized_search(X, y, framework, random_state, output_file_name, classifie
         else:
             metric_kargs = {}
 
-
         scorer = metric_module.get_scorer(**metric_kargs)
         nb_possible_combinations = compute_possible_combinations(params_dict)
-        n_iter_real= min(n_iter, nb_possible_combinations)
+        n_iter_real = min(n_iter, nb_possible_combinations)
 
         random_search = MultiviewCompatibleRandomizedSearchCV(estimator,
-                                                              n_iter=int(n_iter_real),
+                                                              n_iter=int(
+                                                                  n_iter_real),
                                                               param_distributions=params_dict,
                                                               refit=True,
-                                                              n_jobs=nb_cores, scoring=scorer,
-                                                              cv=folds, random_state=random_state,
+                                                              n_jobs=nb_cores,
+                                                              scoring=scorer,
+                                                              cv=folds,
+                                                              random_state=random_state,
                                                               learning_indices=learning_indices,
                                                               view_indices=view_indices,
-                                                              framework = framework,
+                                                              framework=framework,
                                                               equivalent_draws=equivalent_draws)
         random_search.fit(X, y)
         best_params = random_search.best_params_
@@ -146,7 +160,8 @@ def randomized_search(X, y, framework, random_state, output_file_name, classifie
             best_params.pop("random_state")
 
         scoresArray = random_search.cv_results_['mean_test_score']
-        params = [(key[6:], value ) for key, value in random_search.cv_results_.items() if key.startswith("param_")]
+        params = [(key[6:], value) for key, value in
+                  random_search.cv_results_.items() if key.startswith("param_")]
         # gen_heat_maps(params, scores_array, output_file_name)
         best_estimator = random_search.best_estimator_
     else:
@@ -164,14 +179,17 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
 
     def __init__(self, estimator, param_distributions, n_iter=10,
                  refit=True, n_jobs=1, scoring=None, cv=None,
-                 random_state=None, learning_indices=None, view_indices=None, framework="monoview",
+                 random_state=None, learning_indices=None, view_indices=None,
+                 framework="monoview",
                  equivalent_draws=True):
         super(MultiviewCompatibleRandomizedSearchCV, self).__init__(estimator,
                                                                     n_iter=n_iter,
                                                                     param_distributions=param_distributions,
                                                                     refit=refit,
-                                                                    n_jobs=n_jobs, scoring=scoring,
-                                                                    cv=cv, random_state=random_state)
+                                                                    n_jobs=n_jobs,
+                                                                    scoring=scoring,
+                                                                    cv=cv,
+                                                                    random_state=random_state)
         self.framework = framework
         self.available_indices = learning_indices
         self.view_indices = view_indices
@@ -179,16 +197,21 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
 
     def fit(self, X, y=None, groups=None, **fit_params):
         if self.framework == "monoview":
-            return super(MultiviewCompatibleRandomizedSearchCV, self).fit(X, y=y, groups=groups, **fit_params)
+            return super(MultiviewCompatibleRandomizedSearchCV, self).fit(X,
+                                                                          y=y,
+                                                                          groups=groups,
+                                                                          **fit_params)
         elif self.framework == "multiview":
-            return self.fit_multiview(X, y=y, groups=groups,**fit_params)
+            return self.fit_multiview(X, y=y, groups=groups, **fit_params)
 
     def fit_multiview(self, X, y=None, groups=None, track_tracebacks=True,
                       **fit_params):
-        n_splits = self.cv.get_n_splits(self.available_indices, y[self.available_indices])
-        folds = list(self.cv.split(self.available_indices, y[self.available_indices]))
+        n_splits = self.cv.get_n_splits(self.available_indices,
+                                        y[self.available_indices])
+        folds = list(
+            self.cv.split(self.available_indices, y[self.available_indices]))
         if self.equivalent_draws:
-            self.n_iter = self.n_iter*X.nb_view
+            self.n_iter = self.n_iter * X.nb_view
         # Fix to allow sklearn > 0.19
         from sklearn.model_selection import ParameterSampler
         candidate_params = list(
@@ -196,31 +219,35 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
                              random_state=self.random_state))
         base_estimator = clone(self.estimator)
         results = {}
-        self.cv_results_ = dict(("param_"+param_name, []) for param_name in candidate_params[0].keys())
+        self.cv_results_ = dict(("param_" + param_name, []) for param_name in
+                                candidate_params[0].keys())
         self.cv_results_["mean_test_score"] = []
         n_failed = 0
         tracebacks = []
         for candidate_param_idx, candidate_param in enumerate(candidate_params):
-            test_scores = np.zeros(n_splits)+1000
+            test_scores = np.zeros(n_splits) + 1000
             try:
                 for fold_idx, (train_indices, test_indices) in enumerate(folds):
                     current_estimator = clone(base_estimator)
                     current_estimator.set_params(**candidate_param)
                     current_estimator.fit(X, y,
-                                          train_indices=self.available_indices[train_indices],
+                                          train_indices=self.available_indices[
+                                              train_indices],
                                           view_indices=self.view_indices)
                     test_prediction = current_estimator.predict(
                         X,
                         self.available_indices[test_indices],
                         view_indices=self.view_indices)
-                    test_score = self.scoring._score_func(y[self.available_indices[test_indices]],
-                                                          test_prediction,
-                                                         **self.scoring._kwargs)
+                    test_score = self.scoring._score_func(
+                        y[self.available_indices[test_indices]],
+                        test_prediction,
+                        **self.scoring._kwargs)
                     test_scores[fold_idx] = test_score
                 for param_name, param in candidate_param.items():
-                    self.cv_results_["param_"+param_name].append(param)
+                    self.cv_results_["param_" + param_name].append(param)
                 cross_validation_score = np.mean(test_scores)
-                self.cv_results_["mean_test_score"].append(cross_validation_score)
+                self.cv_results_["mean_test_score"].append(
+                    cross_validation_score)
                 results[candidate_param_idx] = cross_validation_score
                 if cross_validation_score <= min(results.values()):
                     self.best_params_ = candidate_params[candidate_param_idx]
@@ -237,29 +264,33 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
                     tracebacks))
 
         if self.refit:
-            self.best_estimator_ = clone(base_estimator).set_params(**self.best_params_)
+            self.best_estimator_ = clone(base_estimator).set_params(
+                **self.best_params_)
             self.best_estimator_.fit(X, y, **fit_params)
         self.n_splits_ = n_splits
         return self
 
     def get_test_folds_preds(self, X, y, estimator):
         test_folds_prediction = []
-        if self.framework=="monoview":
+        if self.framework == "monoview":
             folds = self.cv.split(np.arange(len(y)), y)
-        if self.framework=="multiview":
+        if self.framework == "multiview":
             folds = self.cv.split(self.available_indices, y)
         fold_lengths = np.zeros(self.cv.n_splits, dtype=int)
         for fold_idx, (train_indices, test_indices) in enumerate(folds):
             fold_lengths[fold_idx] = len(test_indices)
             if self.framework == "monoview":
                 estimator.fit(X[train_indices], y[train_indices])
-                test_folds_prediction.append(estimator.predict(X[train_indices]))
-            if self.framework =="multiview":
+                test_folds_prediction.append(
+                    estimator.predict(X[train_indices]))
+            if self.framework == "multiview":
                 estimator.fit(X, y, self.available_indices[train_indices])
-                test_folds_prediction.append(estimator.predict(X, self.available_indices[test_indices]))
+                test_folds_prediction.append(
+                    estimator.predict(X, self.available_indices[test_indices]))
         min_fold_length = fold_lengths.min()
         test_folds_prediction = np.array(
-            [test_fold_prediction[:min_fold_length] for test_fold_prediction in test_folds_prediction])
+            [test_fold_prediction[:min_fold_length] for test_fold_prediction in
+             test_folds_prediction])
         return test_folds_prediction
 
 
@@ -333,7 +364,8 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
 #     return classifier
 
 
-def spear_mint(dataset, classifier_name, views_indices=None, k_folds=None, n_iter=1,
+def spear_mint(dataset, classifier_name, views_indices=None, k_folds=None,
+               n_iter=1,
                **kwargs):
     """Used to perform spearmint on the classifiers to optimize hyper parameters,
     longer than randomsearch (can't be parallelized)"""
@@ -362,7 +394,8 @@ def gen_heat_maps(params, scores_array, output_file_name):
 
         scores_matrix = np.zeros(
             (len(param_array2_set), len(param_array1_set))) - 0.1
-        for param1, param2, score in zip(param_array1, param_array2, scores_array):
+        for param1, param2, score in zip(param_array1, param_array2,
+                                         scores_array):
             param1_index, = np.where(param_array1_set == param1)
             param2_index, = np.where(param_array2_set == param2)
             scores_matrix[int(param2_index), int(param1_index)] = score
@@ -375,10 +408,12 @@ def gen_heat_maps(params, scores_array, output_file_name):
         plt.ylabel(param_name2)
         plt.colorbar()
         plt.xticks(np.arange(len(param_array1_set)), param_array1_set)
-        plt.yticks(np.arange(len(param_array2_set)), param_array2_set, rotation=45)
+        plt.yticks(np.arange(len(param_array2_set)), param_array2_set,
+                   rotation=45)
         plt.title('Validation metric')
         plt.savefig(
-            output_file_name + "heat_map-" + param_name1 + "-" + param_name2 + ".png", transparent=True)
+            output_file_name + "heat_map-" + param_name1 + "-" + param_name2 + ".png",
+            transparent=True)
         plt.close()
 
 # nohup python ~/dev/git/spearmint/spearmint/main.py . &
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py b/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py
index ac53853a52f0338b3bbf1e289a2582f5f4d2707a..5810e37bdddd002a96ff73d97d37d8f85245fbe9 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py
@@ -1,24 +1,5 @@
-import os, sys, inspect
-# from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.adaboost import Adaboost
-
-
 import importlib
-
-#
-# if instring in mymodule.__file__:
-#
-#     sig = inspect.signature(monInstance.__init__)
-#     for arg_idx, name in enumerate(sig.parameters):
-#         param= sig.parameters[name]
-#         if not name.startswith('self'):
-#             parameter["0"].append(name)
-#
-#             if param.default is not inspect.Parameter.empty:
-#                 value_default = param.default
-#             else:
-#                 value_default = 'None'
-#     print()
-
+import inspect
 
 class ConfigurationMaker():
     """
@@ -33,14 +14,13 @@ class ConfigurationMaker():
     def __init__(self, classifier_dict=None):
         if classifier_dict is None:
             classifier_dict = {"0": ['mono', 'Adaboost',
-                            'multiview_platform.mono_multi_view_classifiers.monoview_classifiers.adaboost']}
+                                     'multiview_platform.mono_multi_view_classifiers.monoview_classifiers.adaboost']}
         names = []
-        for key, val in  classifier_dict.items():
+        for key, val in classifier_dict.items():
             mymodule = importlib.import_module(val[2])
             names.append(self._get_module_name(mymodule))
             monInstance = getattr(mymodule, val[1])
 
-
     def _get_module_name(self, mymodule):
         for name in dir(mymodule):
             att = getattr(mymodule, name)
@@ -56,4 +36,4 @@ class ConfigurationMaker():
 
 
 if __name__ == '__main__':
-     ConfigurationMaker()
+    ConfigurationMaker()
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py b/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py
index d8165d426ae1364bf304e1fccdec40ab5948fbdb..dbd323500f0499545eeb53a16f12e906662176b6 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py
@@ -1,16 +1,12 @@
-import itertools
-from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
-import numpy as np
-from abc import abstractmethod
-
-from sklearn.preprocessing import LabelBinarizer
-from sklearn.base import clone, is_classifier, is_regressor
 import array
+
+import numpy as np
 import scipy.sparse as sp
+from sklearn.base import clone, is_classifier, is_regressor
+from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
 from sklearn.multiclass import _ovr_decision_function
+from sklearn.preprocessing import LabelBinarizer
 
-
-from .base import BaseClassifier
 from .dataset import get_examples_views_indices
 
 
@@ -134,7 +130,7 @@ def get_mc_estim(estimator, random_state, y=None, multiview=False,
     estimator : sklearn-like estimator
         Either the aksed estimator, or a multiclass-compatible wrapper over the asked estimator
     """
-    if (y is not None and np.unique(y).shape[0]>2) or multiclass :
+    if (y is not None and np.unique(y).shape[0] > 2) or multiclass:
         if not clone(estimator).accepts_multi_class(random_state):
             if hasattr(estimator, "predict_proba"):
                 if multiview:
@@ -205,11 +201,10 @@ class OVOWrapper(MonoviewWrapper, OneVsOneClassifier):
         return Y
 
 
-
 # The following code is a mutliview adaptation of sklearns multiclass package
 
 def _multiview_fit_binary(estimator, X, y, train_indices,
-                          view_indices,classes=None,):
+                          view_indices, classes=None, ):
     # TODO : Verifications des sklearn
     estimator = clone(estimator)
     estimator.fit(X, y, train_indices=train_indices,
@@ -226,7 +221,7 @@ def _multiview_predict_binary(estimator, X, example_indices, view_indices):
     except (AttributeError, NotImplementedError):
         # probabilities of the positive class
         score = estimator.predict_proba(X, example_indices=example_indices,
-                                 view_indices=view_indices)[:, 1]
+                                        view_indices=view_indices)[:, 1]
     return score
 
 
@@ -260,8 +255,8 @@ class MultiviewOVRWrapper(MultiviewWrapper, OneVsRestClassifier):
 
     def predict(self, X, example_indices=None, view_indices=None):
         example_indices, view_indices = get_examples_views_indices(X,
-                                                                    example_indices,
-                                                                    view_indices)
+                                                                   example_indices,
+                                                                   view_indices)
         n_samples = len(example_indices)
         if self.label_binarizer_.y_type_ == "multiclass":
             maxima = np.empty(n_samples, dtype=float)
@@ -285,7 +280,8 @@ class MultiviewOVRWrapper(MultiviewWrapper, OneVsRestClassifier):
                 indices.extend(
                     np.where(_multiview_predict_binary(e, X,
                                                        example_indices,
-                                                       view_indices) > thresh)[0])
+                                                       view_indices) > thresh)[
+                        0])
                 indptr.append(len(indices))
             data = np.ones(len(indices), dtype=int)
             indicator = sp.csc_matrix((data, indices, indptr),
@@ -303,9 +299,9 @@ def _multiview_fit_ovo_binary(estimator, X, y, i, j, train_indices,
     indcond = np.arange(X.get_nb_examples())[cond]
     train_indices = np.intersect1d(train_indices, indcond)
     return _multiview_fit_binary(estimator,
-                       X,
-                       y_binary,  train_indices, view_indices, classes=[i, j]), train_indices
-
+                                 X,
+                                 y_binary, train_indices, view_indices,
+                                 classes=[i, j]), train_indices
 
 
 class MultiviewOVOWrapper(MultiviewWrapper, OneVsOneClassifier):
@@ -339,9 +335,9 @@ class MultiviewOVOWrapper(MultiviewWrapper, OneVsOneClassifier):
             self.estimator, X, y, self.classes_[i], self.classes_[j],
             train_indices,
             view_indices
-            )
+        )
             for i in range(n_classes) for j in range(i + 1, n_classes)
-            ])))
+        ])))
 
         self.estimators_ = estimators_indices[0]
         self.pairwise_indices_ = (
@@ -384,9 +380,10 @@ class MultiviewOVOWrapper(MultiviewWrapper, OneVsOneClassifier):
         else:
             # TODO Gram matrix compatibility
             Xs = [X[:, idx] for idx in indices]
-        predictions = np.vstack([est.predict(Xi, example_indices=example_indices,
-                                             view_indices=view_indices)
-                                 for est, Xi in zip(self.estimators_, Xs)]).T
+        predictions = np.vstack(
+            [est.predict(Xi, example_indices=example_indices,
+                         view_indices=view_indices)
+             for est, Xi in zip(self.estimators_, Xs)]).T
         confidences = np.ones(predictions.shape)
         # confidences = np.vstack([_predict_binary(est, Xi)
         #                          for est, Xi in zip(self.estimators_, Xs)]).T
@@ -395,5 +392,3 @@ class MultiviewOVOWrapper(MultiviewWrapper, OneVsOneClassifier):
         if self.n_classes_ == 2:
             return Y[:, 1]
         return Y
-
-
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py b/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
index df3e59727fadf66b6c2836f59afc546a57cd69e4..d3a87d7f6ad51f4578943e939a6624d7fbf5c826 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
@@ -10,8 +10,9 @@ def print_metric_score(metric_scores, metrics):
     for metric in metrics:
         metric_module = getattr(metrics, metric[0])
         if metric[1] is not None:
-            metric_kwargs = dict((index, metricConfig) for index, metricConfig in
-                                 enumerate(metric[1]))
+            metric_kwargs = dict(
+                (index, metricConfig) for index, metricConfig in
+                enumerate(metric[1]))
         else:
             metric_kwargs = {}
         metric_score_string += "\tFor " + metric_module.get_config(
@@ -24,7 +25,8 @@ def print_metric_score(metric_scores, metrics):
     return metric_score_string
 
 
-def get_total_metric_scores(metric, train_labels, test_labels, validation_indices,
+def get_total_metric_scores(metric, train_labels, test_labels,
+                            validation_indices,
                             learning_indices, labels):
     metric_module = getattr(metrics, metric[0])
     if metric[1] is not None:
@@ -43,8 +45,10 @@ def get_metrics_scores(metrics_var, train_labels, test_labels,
                        validation_indices, learning_indices, labels):
     metrics_scores = {}
     for metric in metrics_var:
-        metrics_scores[metric[0]] = get_total_metric_scores(metric, train_labels,
+        metrics_scores[metric[0]] = get_total_metric_scores(metric,
+                                                            train_labels,
                                                             test_labels,
                                                             validation_indices,
-                                                            learning_indices, labels)
+                                                            learning_indices,
+                                                            labels)
     return metrics_scores
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/transformations.py b/multiview_platform/mono_multi_view_classifiers/utils/transformations.py
index 2a68282325538c70dc90ea51b2a4f3e3ab1ba52c..17e7b90d3b0d186495893220514524b4e0a648a2 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/transformations.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/transformations.py
@@ -1,5 +1,5 @@
 import numpy as np
-from sklearn.preprocessing import LabelEncoder
+
 
 def sign_labels(labels):
     """
@@ -23,6 +23,7 @@ def sign_labels(labels):
     else:
         return labels
 
+
 def unsign_labels(labels):
     """
     The inverse function
@@ -35,10 +36,9 @@ def unsign_labels(labels):
     -------
 
     """
-    if len(labels.shape)==2:
-        labels = labels.reshape((labels.shape[0], ))
+    if len(labels.shape) == 2:
+        labels = labels.reshape((labels.shape[0],))
     if -1 in labels:
         return np.array([label if label != -1 else 0 for label in labels])
     else:
         return labels
-
diff --git a/multiview_platform/tests/test_mono_view/test_MonoviewUtils.py b/multiview_platform/tests/test_mono_view/test_MonoviewUtils.py
index 628a51143ff5f5aa0554961fac28aadebd979eb6..61277552368917e82e1b034c5323e6177f0cc41b 100644
--- a/multiview_platform/tests/test_mono_view/test_MonoviewUtils.py
+++ b/multiview_platform/tests/test_mono_view/test_MonoviewUtils.py
@@ -23,10 +23,10 @@ class Test_genTestFoldsPreds(unittest.TestCase):
         # print(cls.y_train)
 
     def test_simple(cls):
-        testFoldsPreds = monoview_utils.genTestFoldsPreds(cls.X_train,
-                                                          cls.y_train,
-                                                          cls.KFolds,
-                                                          cls.estimator)
+        testFoldsPreds = monoview_utils.gen_test_folds_preds(cls.X_train,
+                                                             cls.y_train,
+                                                             cls.KFolds,
+                                                             cls.estimator)
         cls.assertEqual(testFoldsPreds.shape, (3, 10))
         np.testing.assert_array_equal(testFoldsPreds[0], np.array(
             [ 1,  1, -1, -1,  1,  1, -1,  1, -1,  1]))