From 4a3e213bb8a06410bb1e5e72638e5a993ef3a126 Mon Sep 17 00:00:00 2001
From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr>
Date: Tue, 18 Jun 2019 17:19:01 -0400
Subject: [PATCH] Revert "Trying to have multiple args"

This reverts commit c38d163b85f1d49df6fa6d84f3cb5ea5c045f5ab.
---
 .../MonoMultiViewClassifiers/ExecClassif.py   |  37 +-
 .../Monoview/ExecClassifMonoView.py           | 320 ++++-----
 .../Monoview/ExportResults.py                 | 632 +++++++++---------
 .../Multiview/ExecMultiview.py                | 192 +++---
 .../utils/GetMultiviewDb.py                   |   3 +-
 .../utils/execution.py                        |  18 +-
 6 files changed, 581 insertions(+), 621 deletions(-)

diff --git a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
index 92683b22..31b56ffd 100644
--- a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
+++ b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
@@ -277,7 +277,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
                      kFolds=None, randomState=None, hyperParamSearch=None,
                      metrics=None, argumentDictionaries=None,
                      benchmark=None, views=None, viewsIndices=None, flag=None,
-                     labels=None, dataset_name=None,
+                     labels=None,
                      ExecMonoview_multicore=ExecMonoview_multicore,
                      ExecMultiview_multicore=ExecMultiview_multicore,
                      initMultiviewArguments=initMultiviewArguments):
@@ -290,7 +290,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
 
     logging.debug("Start:\t Monoview benchmark")
     resultsMonoview += [
-        ExecMonoview_multicore(directory, dataset_name, labelsNames,
+        ExecMonoview_multicore(directory, args.name, labelsNames,
                                classificationIndices, kFolds,
                                coreIndex, args.type, args.pathF, randomState,
                                labels,
@@ -311,7 +311,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None,
 
     logging.debug("Start:\t Multiview benchmark")
     resultsMultiview = [
-        ExecMultiview_multicore(directory, coreIndex, dataset_name,
+        ExecMultiview_multicore(directory, coreIndex, args.name,
                                 classificationIndices, kFolds, args.type,
                                 args.pathF, LABELS_DICTIONARY, randomState,
                                 labels, hyperParamSearch=hyperParamSearch,
@@ -330,7 +330,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
                                hyperParamSearch=None, metrics=None,
                                argumentDictionaries=None,
                                benchmark=None, views=None, viewsIndices=None,
-                               flag=None, labels=None, dataset_name=None,
+                               flag=None, labels=None,
                                ExecMonoview_multicore=ExecMonoview_multicore,
                                ExecMultiview_multicore=ExecMultiview_multicore,
                                initMultiviewArguments=initMultiviewArguments):
@@ -346,7 +346,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
     nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
     for stepIndex in range(nbMulticoreToDo):
         resultsMonoview += (Parallel(n_jobs=nbCores)(
-            delayed(ExecMonoview_multicore)(directory, dataset_name, labelsNames,
+            delayed(ExecMonoview_multicore)(directory, args.name, labelsNames,
                                             classificationIndices, kFolds,
                                             coreIndex, args.type, args.pathF,
                                             randomState, labels,
@@ -374,7 +374,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None,
     nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
     for stepIndex in range(nbMulticoreToDo):
         resultsMultiview += Parallel(n_jobs=nbCores)(
-            delayed(ExecMultiview_multicore)(directory, coreIndex, dataset_name,
+            delayed(ExecMultiview_multicore)(directory, coreIndex, args.name,
                                              classificationIndices, kFolds,
                                              args.type, args.pathF,
                                              LABELS_DICTIONARY, randomState,
@@ -399,7 +399,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
                              hyperParamSearch=None, metrics=None,
                              argumentDictionaries=None,
                              benchmark=None, views=None, viewsIndices=None,
-                             flag=None, labels=None, dataset_name=None,
+                             flag=None, labels=None,
                              ExecMonoview_multicore=ExecMonoview_multicore,
                              ExecMultiview_multicore=ExecMultiview_multicore,
                              initMultiviewArguments=initMultiviewArguments):
@@ -411,7 +411,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
         X = DATASET.get("View" + str(arguments["viewIndex"]))
         Y = labels
         resultsMonoview += [
-            ExecMonoview(directory, X, Y, dataset_name, labelsNames,
+            ExecMonoview(directory, X, Y, args.name, labelsNames,
                          classificationIndices, kFolds,
                          1, args.type, args.pathF, randomState,
                          hyperParamSearch=hyperParamSearch, metrics=metrics,
@@ -432,7 +432,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
     resultsMultiview = []
     for arguments in argumentDictionaries["Multiview"]:
         resultsMultiview += [
-            ExecMultiview(directory, DATASET, dataset_name, classificationIndices,
+            ExecMultiview(directory, DATASET, args.name, classificationIndices,
                           kFolds, 1, args.type,
                           args.pathF, LABELS_DICTIONARY, randomState, labels,
                           hyperParamSearch=hyperParamSearch,
@@ -442,7 +442,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None,
     return [flag, resultsMonoview + resultsMultiview]
 
 
-def execBenchmark(dataset_name, nbCores, statsIter, nbMulticlass,
+def execBenchmark(nbCores, statsIter, nbMulticlass,
                   benchmarkArgumentsDictionaries, classificationIndices,
                   directories,
                   directory, multiClassLabels, metrics, labelsDictionary,
@@ -516,7 +516,7 @@ def execBenchmark(dataset_name, nbCores, statsIter, nbMulticlass,
         logging.debug("Start:\t Deleting " + str(
             nbCores) + " temporary datasets for multiprocessing")
         args = benchmarkArgumentsDictionaries[0]["args"]
-        datasetFiles = delete(args.pathF, dataset_name, nbCores)
+        datasetFiles = delete(args.pathF, args.name, nbCores)
         logging.debug("Start:\t Deleting datasets for multiprocessing")
     # Do everything with flagging
     nbExamples = len(classificationIndices[0][0]) + len(
@@ -547,16 +547,16 @@ def execClassif(arguments):
     monoviewAlgos = args.CL_algos_monoview
     multiviewAlgos = args.CL_algos_multiview
 
-    directory = execution.init_result_directory(args.name, args.views, args.CL_type,
-                                                args.log, args.debug, args.label,
-                                                args.res_dir)
+    directory = execution.initLogFile(args.name, args.views, args.CL_type,
+                                      args.log, args.debug, args.label,
+                                      args.res_dir)
     randomState = execution.initRandomState(args.randomState, directory)
     statsIterRandomStates = execution.initStatsIterRandomStates(statsIter,
                                                                 randomState)
 
     getDatabase = execution.getDatabaseFunction(args.name, args.type)
 
-    DATASET, LABELS_DICTIONARY, dataset_name = getDatabase(args.views,
+    DATASET, LABELS_DICTIONARY, datasetname = getDatabase(args.views,
                                                           args.pathF, args.name,
                                                           args.CL_nbClass,
                                                           args.CL_classes,
@@ -564,6 +564,7 @@ def execClassif(arguments):
                                                           args.full,
                                                           args.add_noise,
                                                           args.noise_std)
+    args.name = datasetname
 
     splits = execution.genSplits(DATASET.get("Labels").value, args.CL_split,
                                  statsIterRandomStates)
@@ -574,7 +575,7 @@ def execClassif(arguments):
     kFolds = execution.genKFolds(statsIter, args.CL_nbFolds,
                                  statsIterRandomStates)
 
-    datasetFiles = Dataset.initMultipleDatasets(args.pathF, dataset_name, nbCores)
+    datasetFiles = Dataset.initMultipleDatasets(args.pathF, args.name, nbCores)
 
     # if not views:
     #     raise ValueError("Empty views list, modify selected views to match dataset " + args.views)
@@ -607,7 +608,7 @@ def execClassif(arguments):
     argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary,
                                             NB_CLASS, initKWARGS)
     directories = execution.genDirecortiesNames(directory, statsIter)
-    benchmarkArgumentDictionaries = execution.genArgumentDictionaries(dataset_name,
+    benchmarkArgumentDictionaries = execution.genArgumentDictionaries(
         LABELS_DICTIONARY, directories, multiclassLabels,
         labelsCombinations, indicesMulticlass,
         hyperParamSearch, args, kFolds,
@@ -616,7 +617,7 @@ def execClassif(arguments):
         views, viewsIndices)
     nbMulticlass = len(labelsCombinations)
 
-    execBenchmark(dataset_name, nbCores, statsIter, nbMulticlass,
+    execBenchmark(nbCores, statsIter, nbMulticlass,
                   benchmarkArgumentDictionaries, splits, directories,
                   directory, multiclassLabels, metrics, LABELS_DICTIONARY,
                   NB_CLASS, DATASET)
diff --git a/multiview_platform/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py b/multiview_platform/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
index 369049a5..67abed09 100644
--- a/multiview_platform/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
+++ b/multiview_platform/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
@@ -4,9 +4,9 @@
 
 import errno
 import logging  # To create Log-Files
+# Import built-in modules
 import os  # to geth path of the running script
 import time  # for time calculations
-import itertools
 
 import h5py
 # Import 3rd party modules
@@ -44,7 +44,7 @@ def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices,
 
 def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices,
                  KFolds, nbCores, databaseType, path,
-                 random_state, hyperParamSearch="randomizedSearch",
+                 randomState, hyperParamSearch="randomizedSearch",
                  metrics=[["accuracy_score", None]], nIter=30, **args):
     logging.debug("Start:\t Loading data")
     kwargs, \
@@ -54,7 +54,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices,
     X, \
     learningRate, \
     labelsString, \
-    output_file_name = initConstants(args, X, classificationIndices, labelsNames,
+    outputFileName = initConstants(args, X, classificationIndices, labelsNames,
                                    name, directory)
 
     logging.debug("Done:\t Loading data")
@@ -77,31 +77,11 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices,
     logging.debug("Done:\t Determine Train/Test split")
 
     logging.debug("Start:\t Generate classifier args")
-    classifier_module = getattr(MonoviewClassifiers, CL_type)
-    cl_kwargs, test_folds_preds, multiple_kwargs_combs = getHPs(classifier_module, hyperParamSearch,
-                                                                nIter, CL_type, X_train, y_train,
-                                                                random_state, output_file_name,
-                                                                KFolds, nbCores, metrics, kwargs)
-    full_labels_pred = np.zeros(Y.shape, dtype=int) - 100
-    data_shape = X.shape
-    view_index = args["viewIndex"]
-    if multiple_kwargs_combs:
-        for classifier_KWARGS in cl_kwargs:
-            specific_output_file_name = gen_specific_output_file_name(classifier_KWARGS, output_file_name)
-            learn_n_pred(classifier_module, CL_type, X_train, X_test, y_train, y_test,
-                         classifier_KWARGS, feat, specific_output_file_name, X_test_multiclass,
-                          test_folds_preds, random_state, full_labels_pred, classificationIndices, name, KFolds, nbCores, hyperParamSearch, metrics, nIter, labelsNames, data_shape, view_index)
-    else:
-        return learn_n_pred(classifier_module, CL_type, X_train, X_test, y_train, y_test,
-                     cl_kwargs, feat, output_file_name, X_test_multiclass,
-                     test_folds_preds, random_state, full_labels_pred, classificationIndices, name, KFolds, nbCores, hyperParamSearch, metrics, nIter, labelsNames, data_shape, view_index)
-
-
-def learn_n_pred(classifierModule, CL_type, X_train, X_test, y_train, y_test,
-                 clKWARGS, feat, outputFileName, X_test_multiclass,
-                 testFoldsPreds, randomState, full_labels_pred, classificationIndices, name, KFolds, nbCores, hyperParamSearch, metrics, nIter, labelsNames, data_shape, view_index):
-
-    t_start = time.time()
+    classifierModule = getattr(MonoviewClassifiers, CL_type)
+    clKWARGS, testFoldsPreds = getHPs(classifierModule, hyperParamSearch,
+                                      nIter, CL_type, X_train, y_train,
+                                      randomState, outputFileName,
+                                      KFolds, nbCores, metrics, kwargs)
     logging.debug("Done:\t Generate classifier args")
 
     logging.debug("Start:\t Training")
@@ -113,6 +93,7 @@ def learn_n_pred(classifierModule, CL_type, X_train, X_test, y_train, y_test,
     logging.debug("Start:\t Predicting")
     y_train_pred = classifier.predict(X_train)
     y_test_pred = classifier.predict(X_test)
+    full_labels_pred = np.zeros(Y.shape, dtype=int) - 100
     for trainIndex, index in enumerate(classificationIndices[0]):
         full_labels_pred[index] = y_train_pred[trainIndex]
     for testIndex, index in enumerate(classificationIndices[1]):
@@ -132,7 +113,7 @@ def learn_n_pred(classifierModule, CL_type, X_train, X_test, y_train, y_test,
     imagesAnalysis, \
     metricsScores = execute(name, classificationIndices, KFolds, nbCores,
                             hyperParamSearch, metrics, nIter, feat, CL_type,
-                            clKWARGS, labelsNames, data_shape,
+                            clKWARGS, labelsNames, X.shape,
                             y_train, y_train_pred, y_test, y_test_pred, t_end,
                             randomState, classifier, outputFileName)
     # cl_desc = [value for key, value in sorted(clKWARGS.items())]
@@ -143,9 +124,10 @@ def learn_n_pred(classifierModule, CL_type, X_train, X_test, y_train, y_test,
                 y_train, imagesAnalysis, y_test)
     logging.info("Done:\t Saving Results")
 
+    viewIndex = args["viewIndex"]
     if testFoldsPreds is None:
         testFoldsPreds = y_train_pred
-    return MonoviewUtils.MonoviewResult(view_index, CL_type, feat, metricsScores,
+    return MonoviewUtils.MonoviewResult(viewIndex, CL_type, feat, metricsScores,
                                         full_labels_pred, clKWARGS,
                                         y_test_multiclass_pred, testFoldsPreds)
     # return viewIndex, [CL_type, feat, metricsScores, full_labels_pred, clKWARGS, y_test_multiclass_pred, testFoldsPreds]
@@ -211,32 +193,12 @@ def getHPs(classifierModule, hyperParamSearch, nIter, CL_type, X_train, y_train,
                                                       nIter=nIter,
                                                       classifier_KWARGS=kwargs[
                                                           CL_type + "KWARGS"])
-        multiple_kwargs_comb = False
         logging.debug("Done:\t " + hyperParamSearch + " best settings")
     else:
         clKWARGS = kwargs[CL_type + "KWARGS"]
-        print(clKWARGS)
-        multiple_args = [len(val)>1 for val in clKWARGS.values()]
-        print(multiple_args)
-        if True in multiple_args:
-            clKWARGS = gen_multiple_kwargs_combinations(clKWARGS)
-            multiple_kwargs_comb = True
-        else:
-            clKWARGS = dict((key, value[0]) for key, value in clKWARGS.items())
-            multiple_kwargs_comb = False
         testFoldsPreds = None
-    return clKWARGS, testFoldsPreds, multiple_kwargs_comb
-
+    return clKWARGS, testFoldsPreds
 
-def gen_multiple_kwargs_combinations(clKWARGS):
-    values_cartesian_prod = [ _ for _ in itertools.product(*clKWARGS.values())]
-    keys = clKWARGS.keys()
-    kwargs_combination = [dict((key, value) for key, value in zip(keys, values)) for values in values_cartesian_prod]
-    return kwargs_combination
-
-
-def gen_specific_output_file_name(classifier_KWARGS, output_file_name):
-    return output_file_name+"_".join(map(str,list(classifier_KWARGS.values())))
 
 def saveResults(stringAnalysis, outputFileName, full_labels_pred, y_train_pred,
                 y_train, imagesAnalysis, y_test):
@@ -267,131 +229,131 @@ def saveResults(stringAnalysis, outputFileName, full_labels_pred, y_train_pred,
                 outputFileName + imageName + '.png')
 
 
-# if __name__ == '__main__':
-#     """The goal of this part of the module is to be able to execute a monoview experimentation
-#      on a node of a cluster independently.
-#      So one need to fill in all the ExecMonoview function arguments with the parse arg function
-#      It could be a good idea to use pickle to store all the 'simple' args in order to reload them easily"""
-#     import argparse
-#     import pickle
-#
-#     from ..utils import Dataset
-#
-#     parser = argparse.ArgumentParser(
-#         description='This methods is used to execute a multiclass classification with one single view. ',
-#         formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-#
-#     groupStandard = parser.add_argument_group('Standard arguments')
-#     groupStandard.add_argument('-log', action='store_true',
-#                                help='Use option to activate Logging to Console')
-#     groupStandard.add_argument('--name', metavar='STRING', action='store',
-#                                help='Name of Database', default='Plausible')
-#     groupStandard.add_argument('--cl_name', metavar='STRING', action='store',
-#                                help='THe name of the monoview classifier to use',
-#                                default='DecisionTree')
-#     groupStandard.add_argument('--view', metavar='STRING', action='store',
-#                                help='Name of the view used', default='View0')
-#     groupStandard.add_argument('--pathF', metavar='STRING', action='store',
-#                                help='Path to the database hdf5 file',
-#                                default='../../../Data/Plausible')
-#     groupStandard.add_argument('--directory', metavar='STRING', action='store',
-#                                help='Path of the output directory', default='')
-#     groupStandard.add_argument('--labelsNames', metavar='STRING',
-#                                action='store', nargs='+',
-#                                help='Name of the labels used for classification',
-#                                default=['Yes', 'No'])
-#     groupStandard.add_argument('--classificationIndices', metavar='STRING',
-#                                action='store',
-#                                help='Path to the classificationIndices pickle file',
-#                                default='')
-#     groupStandard.add_argument('--KFolds', metavar='STRING', action='store',
-#                                help='Path to the kFolds pickle file',
-#                                default='')
-#     groupStandard.add_argument('--nbCores', metavar='INT', action='store',
-#                                help='Number of cores, -1 for all',
-#                                type=int, default=1)
-#     groupStandard.add_argument('--randomState', metavar='INT', action='store',
-#                                help='Seed for the random state or pickable randomstate file',
-#                                default=42)
-#     groupStandard.add_argument('--hyperParamSearch', metavar='STRING',
-#                                action='store',
-#                                help='The type of method used to search the best set of hyper parameters',
-#                                default='randomizedSearch')
-#     groupStandard.add_argument('--metrics', metavar='STRING', action='store',
-#                                help='Path to the pickle file describing the metricsused to analyze the performance',
-#                                default='')
-#     groupStandard.add_argument('--kwargs', metavar='STRING', action='store',
-#                                help='Path to the pickle file containing the key-words arguments used for classification',
-#                                default='')
-#     groupStandard.add_argument('--nIter', metavar='INT', action='store',
-#                                help='Number of itetarion in hyper parameter search',
-#                                type=int,
-#                                default=10)
-#
-#     args = parser.parse_args()
-#
-#     directory = args.directory
-#     name = args.name
-#     classifierName = args.cl_name
-#     labelsNames = args.labelsNames
-#     viewName = args.view
-#     with open(args.classificationIndices, 'rb') as handle:
-#         classificationIndices = pickle.load(handle)
-#     with open(args.KFolds, 'rb') as handle:
-#         KFolds = pickle.load(handle)
-#     nbCores = args.nbCores
-#     path = args.pathF
-#     with open(args.randomState, 'rb') as handle:
-#         randomState = pickle.load(handle)
-#     hyperParamSearch = args.hyperParamSearch
-#     with open(args.metrics, 'rb') as handle:
-#         metrics = pickle.load(handle)
-#     nIter = args.nIter
-#     with open(args.kwargs, 'rb') as handle:
-#         kwargs = pickle.load(handle)
-#
-#     databaseType = None
-#
-#     # Extract the data using MPI
-#     X, Y = Dataset.getMonoviewShared(path, name, viewName)
-#
-#     # Init log
-#     logFileName = time.strftime(
-#         "%Y_%m_%d-%H_%M_%S") + "-" + name + "-" + viewName + "-" + classifierName + '-LOG'
-#     if not os.path.exists(os.path.dirname(directory + logFileName)):
-#         try:
-#             os.makedirs(os.path.dirname(directory + logFileName))
-#         except OSError as exc:
-#             if exc.errno != errno.EEXIST:
-#                 raise
-#     logFile = directory + logFileName
-#     if os.path.isfile(logFile + ".log"):
-#         for i in range(1, 20):
-#             testFileName = logFileName + "-" + str(i) + ".log"
-#             if not (os.path.isfile(directory + testFileName)):
-#                 logFile = directory + testFileName
-#                 break
-#     else:
-#         logFile += ".log"
-#     logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
-#                         filename=logFile, level=logging.DEBUG,
-#                         filemode='w')
-#     if args.log:
-#         logging.getLogger().addHandler(logging.StreamHandler())
-#
-#     # Computing on multiple cores
-#     res = ExecMonoview(directory, X, Y, name, labelsNames,
-#                        classificationIndices, KFolds, nbCores, databaseType,
-#                        path,
-#                        randomState, hyperParamSearch=hyperParamSearch,
-#                        metrics=metrics, nIter=nIter, **kwargs)
-#
-#     with open(directory + "res.pickle", "wb") as handle:
-#         pickle.dump(res, handle)
-#
-#     # Pickle the res in a file to be reused.
-#     # Go put a token in the token files without breaking everything.
-#
-#     # Need to write a function to be  able to know the timeu sed
-#     # for a monoview experimentation approximately and the ressource it uses to write automatically the file in the shell
-#     # it will have to be a not-too close approx as the taskswont be long and Ram-o-phage
+if __name__ == '__main__':
+    """The goal of this part of the module is to be able to execute a monoview experimentation
+     on a node of a cluster independently.
+     So one need to fill in all the ExecMonoview function arguments with the parse arg function
+     It could be a good idea to use pickle to store all the 'simple' args in order to reload them easily"""
+    import argparse
+    import pickle
+
+    from ..utils import Dataset
+
+    parser = argparse.ArgumentParser(
+        description='This methods is used to execute a multiclass classification with one single view. ',
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+    groupStandard = parser.add_argument_group('Standard arguments')
+    groupStandard.add_argument('-log', action='store_true',
+                               help='Use option to activate Logging to Console')
+    groupStandard.add_argument('--name', metavar='STRING', action='store',
+                               help='Name of Database', default='Plausible')
+    groupStandard.add_argument('--cl_name', metavar='STRING', action='store',
+                               help='THe name of the monoview classifier to use',
+                               default='DecisionTree')
+    groupStandard.add_argument('--view', metavar='STRING', action='store',
+                               help='Name of the view used', default='View0')
+    groupStandard.add_argument('--pathF', metavar='STRING', action='store',
+                               help='Path to the database hdf5 file',
+                               default='../../../Data/Plausible')
+    groupStandard.add_argument('--directory', metavar='STRING', action='store',
+                               help='Path of the output directory', default='')
+    groupStandard.add_argument('--labelsNames', metavar='STRING',
+                               action='store', nargs='+',
+                               help='Name of the labels used for classification',
+                               default=['Yes', 'No'])
+    groupStandard.add_argument('--classificationIndices', metavar='STRING',
+                               action='store',
+                               help='Path to the classificationIndices pickle file',
+                               default='')
+    groupStandard.add_argument('--KFolds', metavar='STRING', action='store',
+                               help='Path to the kFolds pickle file',
+                               default='')
+    groupStandard.add_argument('--nbCores', metavar='INT', action='store',
+                               help='Number of cores, -1 for all',
+                               type=int, default=1)
+    groupStandard.add_argument('--randomState', metavar='INT', action='store',
+                               help='Seed for the random state or pickable randomstate file',
+                               default=42)
+    groupStandard.add_argument('--hyperParamSearch', metavar='STRING',
+                               action='store',
+                               help='The type of method used to search the best set of hyper parameters',
+                               default='randomizedSearch')
+    groupStandard.add_argument('--metrics', metavar='STRING', action='store',
+                               help='Path to the pickle file describing the metricsused to analyze the performance',
+                               default='')
+    groupStandard.add_argument('--kwargs', metavar='STRING', action='store',
+                               help='Path to the pickle file containing the key-words arguments used for classification',
+                               default='')
+    groupStandard.add_argument('--nIter', metavar='INT', action='store',
+                               help='Number of itetarion in hyper parameter search',
+                               type=int,
+                               default=10)
+
+    args = parser.parse_args()
+
+    directory = args.directory
+    name = args.name
+    classifierName = args.cl_name
+    labelsNames = args.labelsNames
+    viewName = args.view
+    with open(args.classificationIndices, 'rb') as handle:
+        classificationIndices = pickle.load(handle)
+    with open(args.KFolds, 'rb') as handle:
+        KFolds = pickle.load(handle)
+    nbCores = args.nbCores
+    path = args.pathF
+    with open(args.randomState, 'rb') as handle:
+        randomState = pickle.load(handle)
+    hyperParamSearch = args.hyperParamSearch
+    with open(args.metrics, 'rb') as handle:
+        metrics = pickle.load(handle)
+    nIter = args.nIter
+    with open(args.kwargs, 'rb') as handle:
+        kwargs = pickle.load(handle)
+
+    databaseType = None
+
+    # Extract the data using MPI
+    X, Y = Dataset.getMonoviewShared(path, name, viewName)
+
+    # Init log
+    logFileName = time.strftime(
+        "%Y_%m_%d-%H_%M_%S") + "-" + name + "-" + viewName + "-" + classifierName + '-LOG'
+    if not os.path.exists(os.path.dirname(directory + logFileName)):
+        try:
+            os.makedirs(os.path.dirname(directory + logFileName))
+        except OSError as exc:
+            if exc.errno != errno.EEXIST:
+                raise
+    logFile = directory + logFileName
+    if os.path.isfile(logFile + ".log"):
+        for i in range(1, 20):
+            testFileName = logFileName + "-" + str(i) + ".log"
+            if not (os.path.isfile(directory + testFileName)):
+                logFile = directory + testFileName
+                break
+    else:
+        logFile += ".log"
+    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+                        filename=logFile, level=logging.DEBUG,
+                        filemode='w')
+    if args.log:
+        logging.getLogger().addHandler(logging.StreamHandler())
+
+    # Computing on multiple cores
+    res = ExecMonoview(directory, X, Y, name, labelsNames,
+                       classificationIndices, KFolds, nbCores, databaseType,
+                       path,
+                       randomState, hyperParamSearch=hyperParamSearch,
+                       metrics=metrics, nIter=nIter, **kwargs)
+
+    with open(directory + "res.pickle", "wb") as handle:
+        pickle.dump(res, handle)
+
+    # Pickle the res in a file to be reused.
+    # Go put a token in the token files without breaking everything.
+
+    # Need to write a function to be  able to know the timeu sed
+    # for a monoview experimentation approximately and the ressource it uses to write automatically the file in the shell
+    # it will have to be a not-too close approx as the taskswont be long and Ram-o-phage
diff --git a/multiview_platform/MonoMultiViewClassifiers/Monoview/ExportResults.py b/multiview_platform/MonoMultiViewClassifiers/Monoview/ExportResults.py
index 0f53bc69..ba1a9088 100644
--- a/multiview_platform/MonoMultiViewClassifiers/Monoview/ExportResults.py
+++ b/multiview_platform/MonoMultiViewClassifiers/Monoview/ExportResults.py
@@ -1,316 +1,316 @@
-# #!/usr/bin/env python
-#
-# """ Library: Functions to export preds to CSV or plots """
-#
-# # Import built-in modules
-# import os  # for iteration throug directories
-# import string  # to generate a range of letters
-#
-# import matplotlib.pyplot as plt  # for Plots
-# import numpy as np  # for Numpy Arrays
-# # Import 3rd party modules
-# import pandas as pd  # for Series and DataFrames
-# # matplotlib.use('Agg')
-# from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, \
-#     HPacker  # to generate the Annotations in plot
-# from pylab import rcParams  # to change size of plot
-# from scipy.interpolate import interp1d  # to Interpolate Data
-# from sklearn import metrics  # For stastics on classification
-#
-# # Import own modules
-#
-# # Author-Info
-# __author__ = "Nikolas Huelsmann"
-# __status__ = "Prototype"  # Production, Development, Prototype
-# __date__ = 2016_03_25
-#
-#
-# #### Export Features to CSV
-# def exportPandasToCSV(pandasSorDF, directory, filename):
-#     file = directory + filename
-#
-#     # Makes sure that the file does not yet exist
-#     if os.path.isfile(file + ".csv"):
-#         for i in range(1, 20):
-#             testFileName = filename + "-" + str(i) + ".csv"
-#             if not os.path.isfile(directory + testFileName):
-#                 pandasSorDF.to_csv(directory + testFileName, sep=';')
-#                 break
-#
-#     else:
-#         pandasSorDF.to_csv(file + ".csv", sep=';')
-#
-#
-# def exportNumpyToCSV(numpyArray, directory, filename, format):
-#     file = directory + filename
-#
-#     # Makes sure that the file does not yet exist
-#     if os.path.isfile(file + ".csv"):
-#         for i in range(1, 20):
-#             testFileName = filename + "-" + str(i) + ".csv"
-#             if not os.path.isfile(directory + testFileName):
-#                 np.savetxt(directory + testFileName, numpyArray, delimiter=";",
-#                            fmt=format)
-#                 break
-#
-#     else:
-#         np.savetxt(file + ".csv", numpyArray, delimiter=";", fmt=format)
-#
-#
-# #### Rendering of results
-#
-# ### Rendering of Score and Time
-# def showScoreTime(directory, filename, store, resScore, resTime, rangeX,
-#                   parameter, feat_desc, cl_desc, fig_desc,
-#                   y_desc1,
-#                   y_desc2):
-#     # Determine interpolated functions
-#     f_score_interp = interp1d(rangeX, resScore, kind='quadratic')
-#     f_time_interp = interp1d(rangeX, resTime, kind='quadratic')
-#
-#     # Change size of plot
-#     rcParams['figure.figsize'] = 20, 10
-#
-#     # Figure1 with subplot
-#     fig, ax1 = plt.subplots()
-#
-#     # plt.plot(x, y, type of line)
-#     # Generating X-Axis
-#     xnew = np.linspace(0, max(rangeX), num=100, endpoint=True)
-#
-#     # First Axis for Score (left)
-#     ax1.plot(rangeX, resScore, 'bo', rangeX, f_score_interp(rangeX), 'b-')
-#     ax1.set_xlabel(parameter, fontsize=16)
-#     ax1.set_ylabel(y_desc1, color='b', fontsize=16)
-#     for tl in ax1.get_yticklabels():
-#         tl.set_color('b')
-#
-#     # First Axis for Time (right)
-#     ax2 = ax1.twinx()
-#     ax2.plot(rangeX, resTime, 'ro', rangeX, f_time_interp(rangeX), 'r-')
-#     ax2.set_ylabel(y_desc2, color='r', fontsize=16)
-#     for tl in ax2.get_yticklabels():
-#         tl.set_color('r')
-#
-#     letters = string.lowercase[0:len(rangeX)]
-#     legend = ""
-#     for act_x, act_score, act_time, act_feat_desc, letter, act_cl_desc in zip(
-#             rangeX, resScore, resTime, feat_desc,
-#             letters, cl_desc):
-#         # Add a letter (a,b,c,..) to each DataPoint
-#         ax1.annotate(letter, xy=(act_x, act_score), xytext=(act_x, act_score))
-#         ax2.annotate(letter, xy=(act_x, act_time), xytext=(act_x, act_time))
-#         # Creates a legend with description of feature and classificator of each datapoint
-#         legend = legend + letter + ") Feature: " + act_feat_desc + "; Classifier: " + act_cl_desc + "\n"
-#
-#     # Remove last \n
-#     legend = legend[:-1]
-#
-#     box1 = TextArea(legend, textprops=dict(color="k"))
-#     box = HPacker(children=[box1],
-#                   align="center",
-#                   pad=0, sep=5)
-#
-#     anchored_box = AnchoredOffsetbox(loc=3,
-#                                      child=box, pad=0.2,
-#                                      frameon=True,
-#                                      bbox_to_anchor=(0, 1.04),
-#                                      # to change the place of the legend (text above of figure)
-#                                      bbox_transform=ax1.transAxes,
-#                                      borderpad=1.0,
-#                                      )
-#     ax1.add_artist(anchored_box)
-#     fig.subplots_adjust(top=0.7)
-#
-#     ax1.legend(['Score Data', 'Score Interpolated'], loc='upper left')
-#     ax2.legend(['Time Data', 'Time Interpolated'], loc='lower right')
-#
-#     plt.title(fig_desc, fontsize=18)
-#
-#     if store:
-#         # Makes sure that the file does not yet exist
-#         file = directory + filename
-#
-#         if os.path.isfile(file + ".png"):
-#             for i in range(1, 20):
-#                 testFileName = filename + "-" + str(i) + ".png"
-#                 if not os.path.isfile(directory + testFileName):
-#                     plt.savefig(directory + testFileName)
-#                     break
-#
-#         else:
-#             plt.savefig(file)
-#     else:
-#         plt.show()
-#
-#     plt.close()
-#
-#
-# ### Result comparision per class
-# def calcScorePerClass(np_labels, np_output):
-#     pd_label_test = pd.Series(np_labels)
-#     pd_output = pd.Series(np_output)
-#     score = []
-#
-#     for i in pd_label_test.unique():
-#         matches = sum(pd_label_test[pd_label_test == i] == pd_output[
-#             pd_label_test[pd_label_test == i].index])
-#         count = float(len(pd_label_test[pd_label_test == i]))
-#         score.append(matches / count)
-#
-#     score = np.array(score)
-#     return score
-#
-#
-# ### Bar-Plot for score
-#
-# def showResults(directory, filename, db, feat, score):
-#     plt.bar(range(0, len(score)), score * 100, 1)
-#     plt.xlabel('ClassLabels')
-#     plt.ylabel('Precision in %')
-#     plt.title(
-#         'Results of ' + feat + '-Classification\n for ' + db + ' Database')
-#     plt.axis([0, len(score), 0, 100])
-#     plt.xticks(range(0, len(score), 5))
-#
-#     # Makes sure that the file does not yet exist
-#     file = directory + filename
-#
-#     if os.path.isfile(file + ".png"):
-#         for i in range(1, 20):
-#             testFileName = filename + "-" + str(i) + ".png"
-#             if not os.path.isfile(directory + testFileName):
-#                 plt.savefig(directory + testFileName)
-#                 break
-#
-#     else:
-#         plt.savefig(file)
-#
-#     plt.close()
-#
-#     # instead of saving - decomment plt.show()
-#     # plt.show()
-#
-#
-# # Function to calculate the accuracy score for test data
-# def accuracy_score(y_test, y_test_pred):
-#     return metrics.accuracy_score(y_test, y_test_pred)
-#
-#
-# # Function to calculate a report of classifiaction and store it
-# def classification_report_df(directory, filename, y_test, y_test_pred, labels,
-#                              target_names):
-#     # Calculate the metrics
-#     precision, recall, f1score, support = metrics.precision_recall_fscore_support(
-#         y_test, y_test_pred, beta=1.0,
-#         labels=labels, pos_label=None,
-#         average=None)
-#
-#     # turn result into DataFrame
-#     scores_df = pd.DataFrame(data=[precision, recall, f1score, support])
-#     scores_df.index = ["Precision", "Recall", "F1", "Support"]
-#     scores_df.columns = target_names
-#     scores_df = scores_df.transpose()
-#
-#     # Store result as CSV
-#     exportPandasToCSV(scores_df, directory, filename)
-#
-#     # return the results
-#     return scores_df
-#
-#
-# # Function to calculate a report of classifiaction and store it
-# def confusion_matrix_df(directory, filename, y_test, y_test_pred, target_names):
-#     # Transform into pd Series
-#     y_actu = pd.Series(y_test, name='Actual')
-#     y_pred = pd.Series(y_test_pred, name='Predicted')
-#
-#     # Calculate confusion matrix
-#     df_confusion = pd.crosstab(y_actu, y_pred, rownames=['Actual'],
-#                                colnames=['Predicted'], margins=True)
-#
-#     # Normalization of confusion matrix
-#     df_conf_norm = df_confusion / df_confusion.sum(axis=1)
-#     df_conf_norm.index = target_names + ['All']
-#     df_conf_norm.columns = target_names + ['All']
-#
-#     # Add Row: Actual / Column: Predicted into first cell [0,0]
-#
-#     # Store result as CSV
-#     exportPandasToCSV(df_conf_norm, directory, filename)
-#
-#     return df_conf_norm
-#
-#
-# def plot_confusion_matrix(directory, filename, df_confusion,
-#                           title='Confusion matrix', cmap=plt.cm.gray_r):
-#     plt.matshow(df_confusion, cmap=cmap)  # imshow
-#     # plt.title(title)
-#     plt.colorbar()
-#     tick_marks = np.arange(len(df_confusion.columns))
-#     plt.xticks(tick_marks, df_confusion.columns, rotation=45)
-#     plt.yticks(tick_marks, df_confusion.index)
-#     # plt.tight_layout()
-#     plt.ylabel(df_confusion.index.name)
-#     plt.xlabel(df_confusion.columns.name)
-#
-#     # Makes sure that the file does not yet exist
-#
-#     file = directory + filename
-#
-#     if os.path.isfile(file + ".png"):
-#         for i in range(1, 20):
-#             testFileName = filename + "-" + str(i) + ".png"
-#             if not os.path.isfile(directory + testFileName):
-#                 plt.savefig(directory + testFileName)
-#                 break
-#
-#     else:
-#         plt.savefig(file)
-#
-#     plt.close()
-#
-#
-# def classification_stats(directory, filename, scores_df, acc):
-#     # Accuracy on test over all classes
-#     acc = acc
-#
-#     # Top 10 classes by F1-Score
-#     top10 = scores_df.sort_values(["F1"], ascending=False).head(10)
-#     top10 = list(top10.index)
-#
-#     # Worst 10 classes by F1-Score
-#     worst10 = scores_df.sort_values(["F1"], ascending=True).head(10)
-#     worst10 = list(worst10.index)
-#
-#     # Ratio of classes with F1-Score==0 of all classes
-#     ratio_zero = float(
-#         float(len(scores_df[scores_df.F1 == 0])) / float(len(scores_df)))
-#
-#     # Mean of F1-Score of top 10 classes by F1-Score
-#     mean_10 = np.mean(
-#         scores_df.sort_values(["F1"], ascending=False).head(10).F1)
-#
-#     # Mean of F1-Score of top 20 classes by F1-Score
-#     mean_20 = np.mean(
-#         scores_df.sort_values(["F1"], ascending=False).head(20).F1)
-#
-#     # Mean of F1-Score of top 30 classes by F1-Score
-#     mean_30 = np.mean(
-#         scores_df.sort_values(["F1"], ascending=False).head(30).F1)
-#
-#     # Create DataFrame with stats
-#     d = {'Statistic': ['Accuracy score on test', 'Top 10 classes by F1-Score',
-#                        'Worst 10 classes by F1-Score',
-#                        'Ratio of classes with F1-Score==0 of all classes',
-#                        'Mean of F1-Score of top 10 classes by F1-Score',
-#                        'Mean of F1-Score of top 20 classes by F1-Score',
-#                        'Mean of F1-Score of top 30 classes by F1-Score'],
-#          'Values': [acc, top10, worst10, ratio_zero, mean_10, mean_20, mean_30]}
-#     df_stats = pd.DataFrame(d)
-#
-#     # Store result as CSV
-#     exportPandasToCSV(df_stats, directory, filename)
-#
-#     # return pandas
-#     return df_stats
+#!/usr/bin/env python
+
+""" Library: Functions to export preds to CSV or plots """
+
+# Import built-in modules
+import os  # for iteration throug directories
+import string  # to generate a range of letters
+
+import matplotlib.pyplot as plt  # for Plots
+import numpy as np  # for Numpy Arrays
+# Import 3rd party modules
+import pandas as pd  # for Series and DataFrames
+# matplotlib.use('Agg')
+from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, \
+    HPacker  # to generate the Annotations in plot
+from pylab import rcParams  # to change size of plot
+from scipy.interpolate import interp1d  # to Interpolate Data
+from sklearn import metrics  # For stastics on classification
+
+# Import own modules
+
+# Author-Info
+__author__ = "Nikolas Huelsmann"
+__status__ = "Prototype"  # Production, Development, Prototype
+__date__ = 2016_03_25
+
+
+#### Export Features to CSV
+def exportPandasToCSV(pandasSorDF, directory, filename):
+    file = directory + filename
+
+    # Makes sure that the file does not yet exist
+    if os.path.isfile(file + ".csv"):
+        for i in range(1, 20):
+            testFileName = filename + "-" + str(i) + ".csv"
+            if not os.path.isfile(directory + testFileName):
+                pandasSorDF.to_csv(directory + testFileName, sep=';')
+                break
+
+    else:
+        pandasSorDF.to_csv(file + ".csv", sep=';')
+
+
+def exportNumpyToCSV(numpyArray, directory, filename, format):
+    file = directory + filename
+
+    # Makes sure that the file does not yet exist
+    if os.path.isfile(file + ".csv"):
+        for i in range(1, 20):
+            testFileName = filename + "-" + str(i) + ".csv"
+            if not os.path.isfile(directory + testFileName):
+                np.savetxt(directory + testFileName, numpyArray, delimiter=";",
+                           fmt=format)
+                break
+
+    else:
+        np.savetxt(file + ".csv", numpyArray, delimiter=";", fmt=format)
+
+
+#### Rendering of results
+
+### Rendering of Score and Time
+def showScoreTime(directory, filename, store, resScore, resTime, rangeX,
+                  parameter, feat_desc, cl_desc, fig_desc,
+                  y_desc1,
+                  y_desc2):
+    # Determine interpolated functions
+    f_score_interp = interp1d(rangeX, resScore, kind='quadratic')
+    f_time_interp = interp1d(rangeX, resTime, kind='quadratic')
+
+    # Change size of plot
+    rcParams['figure.figsize'] = 20, 10
+
+    # Figure1 with subplot
+    fig, ax1 = plt.subplots()
+
+    # plt.plot(x, y, type of line)
+    # Generating X-Axis
+    xnew = np.linspace(0, max(rangeX), num=100, endpoint=True)
+
+    # First Axis for Score (left)
+    ax1.plot(rangeX, resScore, 'bo', rangeX, f_score_interp(rangeX), 'b-')
+    ax1.set_xlabel(parameter, fontsize=16)
+    ax1.set_ylabel(y_desc1, color='b', fontsize=16)
+    for tl in ax1.get_yticklabels():
+        tl.set_color('b')
+
+    # First Axis for Time (right)
+    ax2 = ax1.twinx()
+    ax2.plot(rangeX, resTime, 'ro', rangeX, f_time_interp(rangeX), 'r-')
+    ax2.set_ylabel(y_desc2, color='r', fontsize=16)
+    for tl in ax2.get_yticklabels():
+        tl.set_color('r')
+
+    letters = string.lowercase[0:len(rangeX)]
+    legend = ""
+    for act_x, act_score, act_time, act_feat_desc, letter, act_cl_desc in zip(
+            rangeX, resScore, resTime, feat_desc,
+            letters, cl_desc):
+        # Add a letter (a,b,c,..) to each DataPoint
+        ax1.annotate(letter, xy=(act_x, act_score), xytext=(act_x, act_score))
+        ax2.annotate(letter, xy=(act_x, act_time), xytext=(act_x, act_time))
+        # Creates a legend with description of feature and classificator of each datapoint
+        legend = legend + letter + ") Feature: " + act_feat_desc + "; Classifier: " + act_cl_desc + "\n"
+
+    # Remove last \n
+    legend = legend[:-1]
+
+    box1 = TextArea(legend, textprops=dict(color="k"))
+    box = HPacker(children=[box1],
+                  align="center",
+                  pad=0, sep=5)
+
+    anchored_box = AnchoredOffsetbox(loc=3,
+                                     child=box, pad=0.2,
+                                     frameon=True,
+                                     bbox_to_anchor=(0, 1.04),
+                                     # to change the place of the legend (text above of figure)
+                                     bbox_transform=ax1.transAxes,
+                                     borderpad=1.0,
+                                     )
+    ax1.add_artist(anchored_box)
+    fig.subplots_adjust(top=0.7)
+
+    ax1.legend(['Score Data', 'Score Interpolated'], loc='upper left')
+    ax2.legend(['Time Data', 'Time Interpolated'], loc='lower right')
+
+    plt.title(fig_desc, fontsize=18)
+
+    if store:
+        # Makes sure that the file does not yet exist
+        file = directory + filename
+
+        if os.path.isfile(file + ".png"):
+            for i in range(1, 20):
+                testFileName = filename + "-" + str(i) + ".png"
+                if not os.path.isfile(directory + testFileName):
+                    plt.savefig(directory + testFileName)
+                    break
+
+        else:
+            plt.savefig(file)
+    else:
+        plt.show()
+
+    plt.close()
+
+
+### Result comparision per class
+def calcScorePerClass(np_labels, np_output):
+    pd_label_test = pd.Series(np_labels)
+    pd_output = pd.Series(np_output)
+    score = []
+
+    for i in pd_label_test.unique():
+        matches = sum(pd_label_test[pd_label_test == i] == pd_output[
+            pd_label_test[pd_label_test == i].index])
+        count = float(len(pd_label_test[pd_label_test == i]))
+        score.append(matches / count)
+
+    score = np.array(score)
+    return score
+
+
+### Bar-Plot for score
+
+def showResults(directory, filename, db, feat, score):
+    plt.bar(range(0, len(score)), score * 100, 1)
+    plt.xlabel('ClassLabels')
+    plt.ylabel('Precision in %')
+    plt.title(
+        'Results of ' + feat + '-Classification\n for ' + db + ' Database')
+    plt.axis([0, len(score), 0, 100])
+    plt.xticks(range(0, len(score), 5))
+
+    # Makes sure that the file does not yet exist
+    file = directory + filename
+
+    if os.path.isfile(file + ".png"):
+        for i in range(1, 20):
+            testFileName = filename + "-" + str(i) + ".png"
+            if not os.path.isfile(directory + testFileName):
+                plt.savefig(directory + testFileName)
+                break
+
+    else:
+        plt.savefig(file)
+
+    plt.close()
+
+    # instead of saving - decomment plt.show()
+    # plt.show()
+
+
+# Function to calculate the accuracy score for test data
+def accuracy_score(y_test, y_test_pred):
+    return metrics.accuracy_score(y_test, y_test_pred)
+
+
+# Function to calculate a report of classifiaction and store it
+def classification_report_df(directory, filename, y_test, y_test_pred, labels,
+                             target_names):
+    # Calculate the metrics
+    precision, recall, f1score, support = metrics.precision_recall_fscore_support(
+        y_test, y_test_pred, beta=1.0,
+        labels=labels, pos_label=None,
+        average=None)
+
+    # turn result into DataFrame
+    scores_df = pd.DataFrame(data=[precision, recall, f1score, support])
+    scores_df.index = ["Precision", "Recall", "F1", "Support"]
+    scores_df.columns = target_names
+    scores_df = scores_df.transpose()
+
+    # Store result as CSV
+    exportPandasToCSV(scores_df, directory, filename)
+
+    # return the results
+    return scores_df
+
+
+# Function to calculate a report of classifiaction and store it
+def confusion_matrix_df(directory, filename, y_test, y_test_pred, target_names):
+    # Transform into pd Series
+    y_actu = pd.Series(y_test, name='Actual')
+    y_pred = pd.Series(y_test_pred, name='Predicted')
+
+    # Calculate confusion matrix
+    df_confusion = pd.crosstab(y_actu, y_pred, rownames=['Actual'],
+                               colnames=['Predicted'], margins=True)
+
+    # Normalization of confusion matrix
+    df_conf_norm = df_confusion / df_confusion.sum(axis=1)
+    df_conf_norm.index = target_names + ['All']
+    df_conf_norm.columns = target_names + ['All']
+
+    # Add Row: Actual / Column: Predicted into first cell [0,0]
+
+    # Store result as CSV
+    exportPandasToCSV(df_conf_norm, directory, filename)
+
+    return df_conf_norm
+
+
+def plot_confusion_matrix(directory, filename, df_confusion,
+                          title='Confusion matrix', cmap=plt.cm.gray_r):
+    plt.matshow(df_confusion, cmap=cmap)  # imshow
+    # plt.title(title)
+    plt.colorbar()
+    tick_marks = np.arange(len(df_confusion.columns))
+    plt.xticks(tick_marks, df_confusion.columns, rotation=45)
+    plt.yticks(tick_marks, df_confusion.index)
+    # plt.tight_layout()
+    plt.ylabel(df_confusion.index.name)
+    plt.xlabel(df_confusion.columns.name)
+
+    # Makes sure that the file does not yet exist
+
+    file = directory + filename
+
+    if os.path.isfile(file + ".png"):
+        for i in range(1, 20):
+            testFileName = filename + "-" + str(i) + ".png"
+            if not os.path.isfile(directory + testFileName):
+                plt.savefig(directory + testFileName)
+                break
+
+    else:
+        plt.savefig(file)
+
+    plt.close()
+
+
+def classification_stats(directory, filename, scores_df, acc):
+    # Accuracy on test over all classes
+    acc = acc
+
+    # Top 10 classes by F1-Score
+    top10 = scores_df.sort_values(["F1"], ascending=False).head(10)
+    top10 = list(top10.index)
+
+    # Worst 10 classes by F1-Score
+    worst10 = scores_df.sort_values(["F1"], ascending=True).head(10)
+    worst10 = list(worst10.index)
+
+    # Ratio of classes with F1-Score==0 of all classes
+    ratio_zero = float(
+        float(len(scores_df[scores_df.F1 == 0])) / float(len(scores_df)))
+
+    # Mean of F1-Score of top 10 classes by F1-Score
+    mean_10 = np.mean(
+        scores_df.sort_values(["F1"], ascending=False).head(10).F1)
+
+    # Mean of F1-Score of top 20 classes by F1-Score
+    mean_20 = np.mean(
+        scores_df.sort_values(["F1"], ascending=False).head(20).F1)
+
+    # Mean of F1-Score of top 30 classes by F1-Score
+    mean_30 = np.mean(
+        scores_df.sort_values(["F1"], ascending=False).head(30).F1)
+
+    # Create DataFrame with stats
+    d = {'Statistic': ['Accuracy score on test', 'Top 10 classes by F1-Score',
+                       'Worst 10 classes by F1-Score',
+                       'Ratio of classes with F1-Score==0 of all classes',
+                       'Mean of F1-Score of top 10 classes by F1-Score',
+                       'Mean of F1-Score of top 20 classes by F1-Score',
+                       'Mean of F1-Score of top 30 classes by F1-Score'],
+         'Values': [acc, top10, worst10, ratio_zero, mean_10, mean_20, mean_30]}
+    df_stats = pd.DataFrame(d)
+
+    # Store result as CSV
+    exportPandasToCSV(df_stats, directory, filename)
+
+    # return pandas
+    return df_stats
diff --git a/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
index 76e3d748..a8b97339 100644
--- a/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
+++ b/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
@@ -188,102 +188,102 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds,
     # return CL_type, classificationKWARGS, metricsScores, fullLabels, testLabelsMulticlass
 
 
-# if __name__ == "__main__":
-#
-#     import argparse
-#
-#     parser = argparse.ArgumentParser(
-#         description='This methods is used to execute a multiclass classification with one single view. ',
-#         formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-#
-#     groupStandard = parser.add_argument_group('Standard arguments')
-#     groupStandard.add_argument('-log', action='store_true',
-#                                help='Use option to activate Logging to Console')
-#     groupStandard.add_argument('--type', metavar='STRING', action='store',
-#                                help='Type of Dataset', default=".hdf5")
-#     groupStandard.add_argument('--name', metavar='STRING', action='store',
-#                                help='Name of Database (default: %(default)s)',
-#                                default='DB')
-#     groupStandard.add_argument('--view', metavar='STRING', action='store',
-#                                help='Name of Feature for Classification (default: %(default)s)',
-#                                default='View0')
-#     groupStandard.add_argument('--pathF', metavar='STRING', action='store',
-#                                help='Path to the views (default: %(default)s)',
-#                                default='Results-FeatExtr/')
-#     groupStandard.add_argument('--directory', metavar='STRING', action='store',
-#                                help='Path to the views (default: %(default)s)',
-#                                default='Results-FeatExtr/')
-#     groupStandard.add_argument('--LABELS_DICTIONARY', metavar='STRING',
-#                                action='store', nargs='+',
-#                                help='Name of classLabels CSV-file  (default: %(default)s)',
-#                                default='classLabels.csv')
-#     groupStandard.add_argument('--classificationIndices', metavar='STRING',
-#                                action='store',
-#                                help='Name of classLabels-Description CSV-file  (default: %(default)s)',
-#                                default='classLabels-Description.csv')
-#     groupStandard.add_argument('--nbCores', metavar='INT', action='store',
-#                                help='Number of cores, -1 for all', type=int,
-#                                default=1)
-#     groupStandard.add_argument('--randomState', metavar='INT', action='store',
-#                                help='Seed for the random state or pickable randomstate file',
-#                                default=42)
-#     groupStandard.add_argument('--hyperParamSearch', metavar='STRING',
-#                                action='store',
-#                                help='The type of method used tosearch the best set of hyper parameters',
-#                                default='randomizedSearch')
-#     groupStandard.add_argument('--metrics', metavar='STRING', action='store',
-#                                nargs="+",
-#                                help='Metrics used in the experimentation, the first will be the one used in CV',
-#                                default=[''])
-#     groupStandard.add_argument('--nIter', metavar='INT', action='store',
-#                                help='Number of itetarion in hyper parameter search',
-#                                type=int,
-#                                default=10)
-#
-#     args = parser.parse_args()
-#
-#     directory = args.directory
-#     name = args.name
-#     LABELS_DICTIONARY = args.LABELS_DICTIONARY
-#     classificationIndices = args.classificationIndices
-#     KFolds = args.KFolds
-#     nbCores = args.nbCores
-#     databaseType = None
-#     path = args.pathF
-#     randomState = args.randomState
-#     hyperParamSearch = args.hyperParamSearch
-#     metrics = args.metrics
-#     nIter = args.nIter
-#     kwargs = args.kwargs
-#
-#     # Extract the data using MPI ?
-#     DATASET = None
-#     labels = None  # (get from CSV ?)
-#
-#     logfilename = "gen a good logfilename"
-#
-#     logfile = directory + logfilename
-#     if os.path.isfile(logfile + ".log"):
-#         for i in range(1, 20):
-#             testFileName = logfilename + "-" + str(i) + ".log"
-#             if not os.path.isfile(directory + testFileName):
-#                 logfile = directory + testFileName
-#                 break
-#     else:
-#         logfile += ".log"
-#
-#     logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
-#                         filename=logfile, level=logging.DEBUG,
-#                         filemode='w')
-#
-#     if args.log:
-#         logging.getLogger().addHandler(logging.StreamHandler())
-#
-#     res = ExecMultiview(directory, DATASET, name, classificationIndices, KFolds,
-#                         nbCores, databaseType, path,
-#                         LABELS_DICTIONARY, randomState, labels,
-#                         hyperParamSearch=hyperParamSearch, metrics=metrics,
-#                         nIter=nIter, **kwargs)
+if __name__ == "__main__":
+
+    import argparse
+
+    parser = argparse.ArgumentParser(
+        description='This methods is used to execute a multiclass classification with one single view. ',
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+    groupStandard = parser.add_argument_group('Standard arguments')
+    groupStandard.add_argument('-log', action='store_true',
+                               help='Use option to activate Logging to Console')
+    groupStandard.add_argument('--type', metavar='STRING', action='store',
+                               help='Type of Dataset', default=".hdf5")
+    groupStandard.add_argument('--name', metavar='STRING', action='store',
+                               help='Name of Database (default: %(default)s)',
+                               default='DB')
+    groupStandard.add_argument('--view', metavar='STRING', action='store',
+                               help='Name of Feature for Classification (default: %(default)s)',
+                               default='View0')
+    groupStandard.add_argument('--pathF', metavar='STRING', action='store',
+                               help='Path to the views (default: %(default)s)',
+                               default='Results-FeatExtr/')
+    groupStandard.add_argument('--directory', metavar='STRING', action='store',
+                               help='Path to the views (default: %(default)s)',
+                               default='Results-FeatExtr/')
+    groupStandard.add_argument('--LABELS_DICTIONARY', metavar='STRING',
+                               action='store', nargs='+',
+                               help='Name of classLabels CSV-file  (default: %(default)s)',
+                               default='classLabels.csv')
+    groupStandard.add_argument('--classificationIndices', metavar='STRING',
+                               action='store',
+                               help='Name of classLabels-Description CSV-file  (default: %(default)s)',
+                               default='classLabels-Description.csv')
+    groupStandard.add_argument('--nbCores', metavar='INT', action='store',
+                               help='Number of cores, -1 for all', type=int,
+                               default=1)
+    groupStandard.add_argument('--randomState', metavar='INT', action='store',
+                               help='Seed for the random state or pickable randomstate file',
+                               default=42)
+    groupStandard.add_argument('--hyperParamSearch', metavar='STRING',
+                               action='store',
+                               help='The type of method used tosearch the best set of hyper parameters',
+                               default='randomizedSearch')
+    groupStandard.add_argument('--metrics', metavar='STRING', action='store',
+                               nargs="+",
+                               help='Metrics used in the experimentation, the first will be the one used in CV',
+                               default=[''])
+    groupStandard.add_argument('--nIter', metavar='INT', action='store',
+                               help='Number of itetarion in hyper parameter search',
+                               type=int,
+                               default=10)
+
+    args = parser.parse_args()
+
+    directory = args.directory
+    name = args.name
+    LABELS_DICTIONARY = args.LABELS_DICTIONARY
+    classificationIndices = args.classificationIndices
+    KFolds = args.KFolds
+    nbCores = args.nbCores
+    databaseType = None
+    path = args.pathF
+    randomState = args.randomState
+    hyperParamSearch = args.hyperParamSearch
+    metrics = args.metrics
+    nIter = args.nIter
+    kwargs = args.kwargs
+
+    # Extract the data using MPI ?
+    DATASET = None
+    labels = None  # (get from CSV ?)
+
+    logfilename = "gen a good logfilename"
+
+    logfile = directory + logfilename
+    if os.path.isfile(logfile + ".log"):
+        for i in range(1, 20):
+            testFileName = logfilename + "-" + str(i) + ".log"
+            if not os.path.isfile(directory + testFileName):
+                logfile = directory + testFileName
+                break
+    else:
+        logfile += ".log"
+
+    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+                        filename=logfile, level=logging.DEBUG,
+                        filemode='w')
+
+    if args.log:
+        logging.getLogger().addHandler(logging.StreamHandler())
+
+    res = ExecMultiview(directory, DATASET, name, classificationIndices, KFolds,
+                        nbCores, databaseType, path,
+                        LABELS_DICTIONARY, randomState, labels,
+                        hyperParamSearch=hyperParamSearch, metrics=metrics,
+                        nIter=nIter, **kwargs)
 
     # Pickle the res
     # Go put your token
diff --git a/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py b/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
index 24542e59..21267f61 100644
--- a/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
+++ b/multiview_platform/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
@@ -131,7 +131,7 @@ def getPlausibleDBhdf5(features, pathF, name, NB_CLASS=3, LABELS_NAME="",
         datasetFile.close()
         datasetFile = h5py.File(pathF + "Plausible.hdf5", "r")
         LABELS_DICTIONARY = {0: "No", 1: "Yes"}
-        return datasetFile, LABELS_DICTIONARY, "Plausible"
+        return datasetFile, LABELS_DICTIONARY
     elif NB_CLASS >= 3:
         firstBound = int(datasetLength / 3)
         rest = datasetLength - 2 * int(datasetLength / 3)
@@ -181,7 +181,6 @@ def getPlausibleDBhdf5(features, pathF, name, NB_CLASS=3, LABELS_NAME="",
         datasetFile.close()
         datasetFile = h5py.File(pathF + "Plausible.hdf5", "r")
         LABELS_DICTIONARY = {0: "No", 1: "Yes", 2: "Maybe"}
-
         return datasetFile, LABELS_DICTIONARY, "Plausible"
 
 
diff --git a/multiview_platform/MonoMultiViewClassifiers/utils/execution.py b/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
index c8773b58..7b83a19b 100644
--- a/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
+++ b/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
@@ -365,17 +365,15 @@ def parseTheArgs(arguments):
 
     groupCGDesc = parser.add_argument_group('CGDesc arguments')
     groupCGDesc.add_argument('--CGD_stumps', metavar='INT', type=int,
-                             nargs='+',
                              action='store',
                              help='Set the n_stumps_per_attribute parameter '
                                   'for CGreed',
-                             default=[1])
+                             default=1)
     groupCGDesc.add_argument('--CGD_n_iter', metavar='INT', type=int,
-                             nargs='+',
                              action='store',
                              help='Set the n_max_iterations parameter for '
                                   'CGreed',
-                             default=[100])
+                             default=100)
 
     groupCGDescTree = parser.add_argument_group('CGDesc arguments')
     groupCGDescTree.add_argument('--CGDT_trees', metavar='INT', type=int,
@@ -686,7 +684,8 @@ def initRandomState(randomStateArg, directory):
     If no random state is specified, it will generate a 'random' seed.
     If the `randomSateArg` is a string containing only numbers, it will be converted in an int to generate a seed.
     If the `randomSateArg` is a string with letters, it must be a path to a pickled random state file that will be loaded.
-    The function will also pickle the new random state in a file to be able to retrieve it later.
+    The function will also pickle the new random state in a file tobe able to retrieve it later.
+    Tested
 
 
     Parameters
@@ -764,7 +763,7 @@ def getDatabaseFunction(name, type):
     return getDatabase
 
 
-def init_result_directory(name, views, CL_type, log, debug, label, result_directory):
+def initLogFile(name, views, CL_type, log, debug, label, result_directory):
     r"""Used to init the directory where the preds will be stored and the log file.
 
     First this function will check if the result directory already exists (only one per minute is allowed).
@@ -789,10 +788,10 @@ def init_result_directory(name, views, CL_type, log, debug, label, result_direct
     """
     if debug:
         resultDirectory = result_directory + name + "/debug_started_" + time.strftime(
-            "%y_%m_%d-%H_%M_%S") + "_" + label + "/"
+            "%Y_%m_%d-%H_%M_%S") + "_" + label + "/"
     else:
         resultDirectory = result_directory + name + "/started_" + time.strftime(
-            "%y_%m_%d-%H_%M") + "_" + label + "/"
+            "%Y_%m_%d-%H_%M") + "_" + label + "/"
     logFileName = time.strftime("%Y_%m_%d-%H_%M") + "-" + ''.join(
         CL_type) + "-" + "_".join(
         views) + "-" + name + "-LOG"
@@ -946,7 +945,7 @@ def genDirecortiesNames(directory, statsIter):
     return directories
 
 
-def genArgumentDictionaries(dataset_name, labelsDictionary, directories, multiclassLabels,
+def genArgumentDictionaries(labelsDictionary, directories, multiclassLabels,
                             labelsCombinations, indicesMulticlass,
                             hyperParamSearch, args, kFolds,
                             statsIterRandomStates, metrics,
@@ -1014,7 +1013,6 @@ def genArgumentDictionaries(dataset_name, labelsDictionary, directories, multicl
                     indicesMulticlass[combinationIndex][1][iterIndex],
                     indicesMulticlass[combinationIndex][2][iterIndex]],
                 "args": args,
-                "dataset_name":dataset_name,
                 "labels": multiclassLabels[combinationIndex],
                 "kFolds": kFolds[iterIndex],
                 "randomState": iterRandomState,
-- 
GitLab