diff --git a/Code/MonoMutliViewClassifiers/ExecClassif.py b/Code/MonoMutliViewClassifiers/ExecClassif.py
index 9d61039aeac623d1171a7a67a12c750f71084785..790385562b9090f1b6de5f95efb56e771746726e 100644
--- a/Code/MonoMutliViewClassifiers/ExecClassif.py
+++ b/Code/MonoMutliViewClassifiers/ExecClassif.py
@@ -447,7 +447,7 @@ groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store'
 groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
                         default=2)
 groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
-                        help='Number of iteration for each algorithm to mean results', type=int,
+                        help="Number of iteration for each algorithm to mean results if using multiple cores, it's highly recommended to use statsiter mod(nbCores) = 0", type=int,
                         default=2)
 groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+",
                         help='Determine which metrics to use, separate metric and configuration with ":".'
diff --git a/Code/MonoMutliViewClassifiers/ResultAnalysis.py b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
index 818860cb1e3729c379d8d291a5badf5c67d24ce2..0cce2e35e4eaeee9e76141c531ee21e7d544d3cb 100644
--- a/Code/MonoMutliViewClassifiers/ResultAnalysis.py
+++ b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
@@ -106,22 +106,18 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
     fig.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-error_analysis.png")
 
 
-def genScoresNames(iterResults, metric, nbResults, names):
-    validationScores = []
-    trainScores = []
-    for iterindex, iterResult in enumerate(iterResults):
+def genScoresNames(iterResults, metric, nbResults, names, nbMono):
+    nbIter = len(iterResults)
+    validationScores = np.zeros((nbIter, nbResults))
+    trainScores = np.zeros((nbIter, nbResults))
+    for iterIndex, iterResult in enumerate(iterResults):
         mono, multi = iterResult
-        import pdb;pdb.set_trace()
-        validationScores[iterindex] = [float(res[1][2][metric[0]][1]) for res in mono]
-        validationScores[iterindex] += [float(scores[metric[0]][1]) for a, b, scores, c in multi]
-        trainScores[iterindex] = [float(res[1][1][2][metric[0]][0]) for res in mono]
-        trainScores[iterindex] += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
-
-        validationScores[iterindex] = np.array(validationScores)
-        trainScores[iterindex] = np.array(trainScores)
+        validationScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][1]) for res in mono])
+        validationScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][1]) for a, b, scores, c in multi])
+        trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono])
+        trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi])
 
-    validationScores = np.array(validationScores)
-    trainScores = np.array(trainScores)
+    # import pdb;pdb.set_trace()
     validationSTDs = np.std(validationScores, axis=0)
     trainSTDs = np.std(trainScores, axis=0)
     validationMeans = np.mean(validationScores, axis=0)
@@ -138,8 +134,10 @@ def genScoresNames(iterResults, metric, nbResults, names):
         metricKWARGS = {}
     sorted_indices = np.argsort(validationMeans)
     validationMeans = validationMeans[sorted_indices]
+    validationSTDs = validationSTDs[sorted_indices]
+    trainSTDs = trainSTDs[sorted_indices]
     trainMeans = trainMeans[sorted_indices]
-    names = names[sorted_indices]
+    names = np.array(names)[sorted_indices]
 
     ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" for each classifier")
     rects = ax.bar(range(nbResults), validationMeans, width, color="r", yerr=validationSTDs)
@@ -155,9 +153,11 @@ def genScoresNames(iterResults, metric, nbResults, names):
 
 def analyzeIterResults(iterResults, name, metrics, directory):
     nbResults = len(iterResults[0][0])+len(iterResults[0][1])
+    nbMono = len(iterResults[0][0])
+    nbMulti = len(iterResults[0][1])
     nbIter = len(iterResults)
     names = genNamesFromRes(iterResults[0][0], iterResults[0][1])
     for metric in metrics:
-        figure = genScoresNames(iterResults, metric, nbResults, names)
+        figure = genScoresNames(iterResults, metric, nbResults, names, nbMono)
         figure.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-Mean_on_"
                        +str(nbIter)+"_iter-"+metric[0]+".png")