diff --git a/Code/MonoMutliViewClassifiers/Monoview/analyzeResult.py b/Code/MonoMutliViewClassifiers/Monoview/analyzeResult.py index fb4d1f8b385d680f23721163a4e87d9add3dd9d3..a48b8097dc0fab5e56b56a954751c995c2405579 100644 --- a/Code/MonoMutliViewClassifiers/Monoview/analyzeResult.py +++ b/Code/MonoMutliViewClassifiers/Monoview/analyzeResult.py @@ -59,7 +59,7 @@ def execute(name, learningRate, nbFolds, nbCores, gridSearch, metrics, nIter, fe metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])) else: metricKWARGS = {} - metricsScores[metric[0]] = [np.mean(np.array([getattr(Metrics, metric[0]).score(y_test, y_test_pred) for y_test, y_test_pred in zip(y_tests, y_test_preds)])), "", + metricsScores[metric[0]] = [np.mean(np.array([getattr(Metrics, metric[0]).score(y_train, y_train_pred) for y_train, y_train_pred in zip(y_trains, y_train_preds)])), "", np.mean(np.array([getattr(Metrics, metric[0]).score(y_test, y_test_pred) for y_test, y_test_pred in zip(y_tests, y_test_preds)]))] stringAnalysis += "\n\n Classification took "+ str(hms(seconds=int(time))) diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SCM.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SCM.py index 67f0ce4ba5a9404535e46b8f1b934483c8df62c9..8fb459f72ac6ab46b08374a92328488fbcecdf1f 100644 --- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SCM.py +++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SCM.py @@ -100,9 +100,9 @@ def gridSearch(X_train, y_train, nbFolds=4, metric=["accuracy_score", None], nIt def getConfig(config): try : - return "\n\t\t- SCM with max_attributes : "+str(config[0])#+", c : "+str(config[1])+", p : "+str(config[2]) + return "\n\t\t- SCM with max_attributes : "+str(config[0])+", c : "+str(config[1])+", p : "+str(config[2]) except: - return "\n\t\t- SCM with max_attributes : "+str(config["0"])#+", c : "+str(config["1"])+", p : "+str(config["2"]) + return "\n\t\t- SCM with max_attributes : "+str(config["0"])+", c : "+str(config["1"])+", p : "+str(config["2"]) def transformData(dataArray): diff --git a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/SCMForLinear.py b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/SCMForLinear.py index 0366d111947725c59510e0ed867aa7b0075ed622..dd7b17e7201adb3fcc6afcdb4991c860575f2c59 100644 --- a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/SCMForLinear.py +++ b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/SCMForLinear.py @@ -112,6 +112,11 @@ class SCMForLinear(LateFusionClassifier): packedDataset = dsetFile.get("temp_scm") attributeClassification = BaptisteRuleClassifications(packedDataset, monoViewDecisions.shape[0]) self.SCMClassifier.fit(binaryAttributes, DATASET.get("Labels")[usedIndices], attribute_classifications=attributeClassification) + try: + dsetFile.close() + os.remove(name) + except: + pass def getConfig(self, fusionMethodConfig, monoviewClassifiersNames,monoviewClassifiersConfigs): configString = "with SCM for linear with max_attributes : "+str(self.config[1])+", p : "+str(self.config[0])+\ diff --git a/Code/MonoMutliViewClassifiers/ResultAnalysis.py b/Code/MonoMutliViewClassifiers/ResultAnalysis.py index 5494d5d809cffc066e906f30c0e726b9ee4b9d1e..951c9f968b7b191dd8883bc4861c2abde8f6b615 100644 --- a/Code/MonoMutliViewClassifiers/ResultAnalysis.py +++ b/Code/MonoMutliViewClassifiers/ResultAnalysis.py @@ -7,6 +7,7 @@ import logging import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt +import numpy as np #Import own Modules import Metrics @@ -20,7 +21,7 @@ def autolabel(rects, ax): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width()/2., 1.01*height, - "%.2f" % round(height, 4), + "%.2f" % height, ha='center', va='bottom') @@ -34,7 +35,10 @@ def resultAnalysis(benchmark, results, name, times, metrics): nbResults = len(mono)+len(multi) validationScores = [float(res[1][2][metric[0]][2]) for res in mono] validationScores += [float(scores[metric[0]][2]) for a, b, scores in multi] + trainScores = [float(res[1][2][metric[0]][0]) for res in mono] + trainScores += [float(scores[metric[0]][0]) for a, b, scores in multi] f = pylab.figure(figsize=(40, 30)) + width = 0.35 # the width of the bars fig = plt.gcf() fig.subplots_adjust(bottom=105.0, top=105.01) ax = f.add_axes([0.1, 0.1, 0.8, 0.8]) @@ -43,9 +47,12 @@ def resultAnalysis(benchmark, results, name, times, metrics): else: metricKWARGS = {} ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" on validation set for each classifier") - rects = ax.bar(range(nbResults), validationScores, align='center') + rects = ax.bar(range(nbResults), validationScores, width, color="r") + rect2 = ax.bar(np.arange(nbResults)+width, trainScores, width, color="0.3") autolabel(rects, ax) - ax.set_xticks(range(nbResults)) + autolabel(rect2, ax) + ax.legend((rects[0], rect2[0]), ('Train', 'Test')) + ax.set_xticks(np.arange(nbResults)+width) ax.set_xticklabels(names, rotation="vertical") f.savefig("Results/"+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-"+metric[0]+".png")