From bcb38103b65bfa11b313b244057ca614f79c9bf3 Mon Sep 17 00:00:00 2001
From: bbauvin <baptiste.bauvin@centrale-marseille.fr>
Date: Tue, 17 Oct 2017 10:32:44 -0400
Subject: [PATCH] Added trainIndices saving in csv, corrected error analysis
 fig size issue

---
 Code/MonoMutliViewClassifiers/ExecClassif.py    | 8 +++++++-
 Code/MonoMutliViewClassifiers/ResultAnalysis.py | 8 +++++---
 2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/Code/MonoMutliViewClassifiers/ExecClassif.py b/Code/MonoMutliViewClassifiers/ExecClassif.py
index dc0fb1aa..f9fb90de 100644
--- a/Code/MonoMutliViewClassifiers/ExecClassif.py
+++ b/Code/MonoMutliViewClassifiers/ExecClassif.py
@@ -284,6 +284,7 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
                               benchmark,
                               views):
     resultsMonoview = []
+    np.savetxt(directories+"train_indices.csv", classificationIndices[0], delimiter=",")
     labelsNames = LABELS_DICTIONARY.values()
     resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
                                                coreIndex, args.type, args.pathF, randomState,
@@ -327,8 +328,13 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
 def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory, args, classificationIndices, kFolds,
                     randomState, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start,
                     benchmark, views):
+    print classificationIndices[0]
+    import pdb;pdb.set_trace()
+
+    np.savetxt(directory+"train_indices.csv", classificationIndices[0], delimiter=",")
     resultsMonoview = []
     labelsNames = LABELS_DICTIONARY.values()
+
     if nbCores > 1:
         nbExperiments = len(argumentDictionaries["Monoview"])
         for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))):
@@ -436,7 +442,7 @@ groupStandard.add_argument('--randomState', metavar='STRING', action='store',
 groupClass = parser.add_argument_group('Classification arguments')
 groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
                         help='Determine the split between learning and validation sets', type=float,
-                        default=0.3)
+                        default=0.2)
 groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
                         type=int, default=2)
 groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
diff --git a/Code/MonoMutliViewClassifiers/ResultAnalysis.py b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
index 720071c1..19af768b 100644
--- a/Code/MonoMutliViewClassifiers/ResultAnalysis.py
+++ b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
@@ -80,7 +80,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=
         plt.close()
 
 
-def analyzeLabels(labelsArrays, realLabels, results, directory):
+def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
     mono, multi = results
     classifiersNames = genNamesFromRes(mono, multi)
     nbClassifiers = len(classifiersNames)
@@ -91,14 +91,16 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
     for classifierIndex in range(nbClassifiers):
         for iterIndex in range(nbIter):
             data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
-    figKW = {"figsize":(nbClassifiers/2, nbExamples/20)}
+    figWidth = max(nbClassifiers/2, minSize)
+    figHeight = max(nbExamples/20, minSize)
+    figKW = {"figsize":(figWidth, figHeight)}
     fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
     cmap = mpl.colors.ListedColormap(['red', 'green'])
     bounds = [-0.5, 0.5, 1.5]
     norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
 
     cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto')
-    plt.title('Error on examples depending on the classifier')
+    plt.title('Errors depending on the classifier')
     ticks = np.arange(0, nbClassifiers * nbIter, nbIter)
     labels = classifiersNames
     plt.xticks(ticks, labels, rotation="vertical")
-- 
GitLab