diff --git a/Code/MonoMutliViewClassifiers/ExecClassif.py b/Code/MonoMutliViewClassifiers/ExecClassif.py
index 5ed69368220819a2c614d735b49e3b45da9ec7f5..9d61039aeac623d1171a7a67a12c750f71084785 100644
--- a/Code/MonoMutliViewClassifiers/ExecClassif.py
+++ b/Code/MonoMutliViewClassifiers/ExecClassif.py
@@ -24,7 +24,7 @@ from Multiview.ExecMultiview import ExecMultiview, ExecMultiview_multicore
 from Monoview.ExecClassifMonoView import ExecMonoview, ExecMonoview_multicore
 import Multiview.GetMultiviewDb as DB
 from Versions import testVersions
-from ResultAnalysis import resultAnalysis, analyzeLabels
+from ResultAnalysis import resultAnalysis, analyzeLabels, analyzeIterResults
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -278,8 +278,9 @@ def genDirecortiesNames(directory, statsIter):
 def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory, args, classificationIndices, kFolds,
                               randomState, hyperParamSearch, metrics, coreIndex, viewsIndices, dataBaseTime, start, benchmark,
                               views):
+    resultsMonoview = []
     labelsNames = LABELS_DICTIONARY.values()
-    resultsMonoview = [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
+    resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
                                                  coreIndex, args.type, args.pathF, randomState,
                                                  hyperParamSearch=hyperParamSearch,
                                                  metrics=metrics, nIter=args.CL_GS_iter,
@@ -289,7 +290,8 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
 
     argumentDictionaries = initMultiviewArguments(args, benchmark, views, viewsIndices, argumentDictionaries, randomState, directory, resultsMonoview)
 
-    resultsMultiview = [
+    resultsMultiview = []
+    resultsMultiview += [
         ExecMultiview_multicore(directory, coreIndex, args.name, classificationIndices, kFolds, args.type,
                                 args.pathF, LABELS_DICTIONARY, randomState, hyperParamSearch=hyperParamSearch,
                                 metrics=metrics, nIter=args.CL_GS_iter,**arguments)
@@ -302,17 +304,17 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
     trueLabels = DATASET.get("Labels").value
     times = [dataBaseTime, monoviewTime, multiviewTime]
     results = (resultsMonoview, resultsMultiview)
-    analyzeLabels(labels, trueLabels, results, directory)
-    logging.debug("Start:\t Analyze Global Results")
-    resultAnalysis(benchmark, results, args.name, times, metrics, directory)
-    logging.debug("Done:\t Analyze Global Results")
-    globalAnalysisTime = time.time() - monoviewTime - dataBaseTime - start - multiviewTime
-    totalTime = time.time() - start
-    logging.info("Extraction time : "+str(dataBaseTime)+
-                 "s, Monoview time : "+str(monoviewTime)+
-                 "s, Multiview Time : "+str(multiviewTime)+
-                 "s, Global Analysis Time : "+str(globalAnalysisTime)+
-                 "s, Total Duration : "+str(totalTime)+"s")
+    # analyzeLabels(labels, trueLabels, results, directory)
+    # logging.debug("Start:\t Analyze Global Results for iteration")
+    # resultAnalysis(benchmark, results, args.name, times, metrics, directory)
+    # logging.debug("Done:\t Analyze Global Results for iteration")
+    # globalAnalysisTime = time.time() - monoviewTime - dataBaseTime - start - multiviewTime
+    # totalTime = time.time() - start
+    # logging.info("Extraction time : "+str(dataBaseTime)+
+    #              "s, Monoview time : "+str(monoviewTime)+
+    #              "s, Multiview Time : "+str(multiviewTime)+
+    #              "s, Global Analysis Time : "+str(globalAnalysisTime)+
+    #              "s, Total Duration : "+str(totalTime)+"s")
     return results
 
 
@@ -382,7 +384,6 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
                  "s, Multiview Time : "+str(multiviewTime)+
                  "s, Global Analysis Time : "+str(globalAnalysisTime)+
                  "s, Total Duration : "+str(totalTime)+"s")
-    return results
 
 
 def initRandomState(randomStateArg, directory):
@@ -642,8 +643,12 @@ if statsIter>1:
         logging.debug("Start:\t Deleting " + str(nbCores) + " temporary datasets for multiprocessing")
         datasetFiles = DB.deleteHDF5(args.pathF, args.name, nbCores)
         logging.debug("Start:\t Deleting datasets for multiprocessing")
+    analyzeIterResults(iterResults, args.name, metrics, directory)
 
 else:
-    res = classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directories, args, classificationIndices, kFolds,
-                          statsIterRandomStates, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start,
-                          benchmark, views)
\ No newline at end of file
+    classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directories, args, classificationIndices, kFolds,
+                    statsIterRandomStates, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start,
+                    benchmark, views)
+
+if statsIter > 1:
+    pass
\ No newline at end of file
diff --git a/Code/MonoMutliViewClassifiers/ResultAnalysis.py b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
index 500423344828ddeefdc3010ecb34a778db65b47e..818860cb1e3729c379d8d291a5badf5c67d24ce2 100644
--- a/Code/MonoMutliViewClassifiers/ResultAnalysis.py
+++ b/Code/MonoMutliViewClassifiers/ResultAnalysis.py
@@ -103,4 +103,61 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
     plt.xticks(ticks, labels, rotation="vertical")
     cbar = fig.colorbar(cax, ticks=[0, 1])
     cbar.ax.set_yticklabels(['Wrong', ' Right'])
-    fig.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-error_analysis.png")
\ No newline at end of file
+    fig.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-error_analysis.png")
+
+
+def genScoresNames(iterResults, metric, nbResults, names):
+    validationScores = []
+    trainScores = []
+    for iterindex, iterResult in enumerate(iterResults):
+        mono, multi = iterResult
+        import pdb;pdb.set_trace()
+        validationScores[iterindex] = [float(res[1][2][metric[0]][1]) for res in mono]
+        validationScores[iterindex] += [float(scores[metric[0]][1]) for a, b, scores, c in multi]
+        trainScores[iterindex] = [float(res[1][1][2][metric[0]][0]) for res in mono]
+        trainScores[iterindex] += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
+
+        validationScores[iterindex] = np.array(validationScores)
+        trainScores[iterindex] = np.array(trainScores)
+
+    validationScores = np.array(validationScores)
+    trainScores = np.array(trainScores)
+    validationSTDs = np.std(validationScores, axis=0)
+    trainSTDs = np.std(trainScores, axis=0)
+    validationMeans = np.mean(validationScores, axis=0)
+    trainMeans = np.mean(trainScores, axis=0)
+
+    f = pylab.figure(figsize=(40, 30))
+    width = 0.35       # the width of the bars
+    fig = plt.gcf()
+    fig.subplots_adjust(bottom=105.0, top=105.01)
+    ax = f.add_axes([0.1, 0.1, 0.8, 0.8])
+    if metric[1]!=None:
+        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
+    else:
+        metricKWARGS = {}
+    sorted_indices = np.argsort(validationMeans)
+    validationMeans = validationMeans[sorted_indices]
+    trainMeans = trainMeans[sorted_indices]
+    names = names[sorted_indices]
+
+    ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" for each classifier")
+    rects = ax.bar(range(nbResults), validationMeans, width, color="r", yerr=validationSTDs)
+    rect2 = ax.bar(np.arange(nbResults)+width, trainMeans, width, color="0.7", yerr=trainSTDs)
+    autolabel(rects, ax)
+    autolabel(rect2, ax)
+    ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
+    ax.set_xticks(np.arange(nbResults)+width)
+    ax.set_xticklabels(names, rotation="vertical")
+
+    return f
+
+
+def analyzeIterResults(iterResults, name, metrics, directory):
+    nbResults = len(iterResults[0][0])+len(iterResults[0][1])
+    nbIter = len(iterResults)
+    names = genNamesFromRes(iterResults[0][0], iterResults[0][1])
+    for metric in metrics:
+        figure = genScoresNames(iterResults, metric, nbResults, names)
+        figure.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-Mean_on_"
+                       +str(nbIter)+"_iter-"+metric[0]+".png")