From fe9ff80eaa7188b1ff73ac9645b9d1523382fdc8 Mon Sep 17 00:00:00 2001 From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr> Date: Fri, 10 Aug 2018 17:45:39 -0400 Subject: [PATCH] Clarified multiview_results --- .../Multiview/ExecMultiview.py | 4 ++- .../Multiview/MultiviewUtils.py | 8 +++++ .../ResultAnalysis.py | 32 +++++++++---------- 3 files changed, 27 insertions(+), 17 deletions(-) create mode 100644 multiview_platform/MonoMultiViewClassifiers/Multiview/MultiviewUtils.py diff --git a/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py index 1f2a5d10..f711aca7 100644 --- a/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py +++ b/multiview_platform/MonoMultiViewClassifiers/Multiview/ExecMultiview.py @@ -9,6 +9,7 @@ import numpy as np from ..utils import HyperParameterSearch from ..utils.Dataset import getShape from .. import MultiviewClassifiers +from .MultiviewUtils import MultiviewResult # Author-Info @@ -157,7 +158,8 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor learningRate, name, imagesAnalysis) logging.debug("Start:\t Saving preds") - return CL_type, classificationKWARGS, metricsScores, fullLabels, testLabelsMulticlass + return MultiviewResult(CL_type, classificationKWARGS, metricsScores, fullLabels, testLabelsMulticlass) + # return CL_type, classificationKWARGS, metricsScores, fullLabels, testLabelsMulticlass if __name__ == "__main__": diff --git a/multiview_platform/MonoMultiViewClassifiers/Multiview/MultiviewUtils.py b/multiview_platform/MonoMultiViewClassifiers/Multiview/MultiviewUtils.py new file mode 100644 index 00000000..5093007d --- /dev/null +++ b/multiview_platform/MonoMultiViewClassifiers/Multiview/MultiviewUtils.py @@ -0,0 +1,8 @@ +class MultiviewResult(object): + def __init__(self, classifier_name, classifier_config, + metrics_scores, full_labels, test_labels_multiclass): + self.classifier_name = classifier_name + self.classifier_config = classifier_config + self.metrics_scores = metrics_scores + self.full_labels = full_labels + self.test_labels_multiclass = test_labels_multiclass \ No newline at end of file diff --git a/multiview_platform/MonoMultiViewClassifiers/ResultAnalysis.py b/multiview_platform/MonoMultiViewClassifiers/ResultAnalysis.py index 7fbf98ff..6c0cee19 100644 --- a/multiview_platform/MonoMultiViewClassifiers/ResultAnalysis.py +++ b/multiview_platform/MonoMultiViewClassifiers/ResultAnalysis.py @@ -66,8 +66,8 @@ def getMetricsScoresBiclass(metrics, monoviewResults, multiviewResults): ---------- metrics : list of lists The metrics names with configuration metrics[i][0] = name of metric i - monoviewResults : list of - The ax. + monoviewResults : list of MonoviewResult objects + A list containing all the resluts for all the monoview experimentations. set : integer 1 means the test scores, anything else means the train score std: None or array @@ -86,11 +86,11 @@ def getMetricsScoresBiclass(metrics, monoviewResults, multiviewResults): testScores.append(classifierResult.metrics_scores[metric[0]][1]) classifiersNames.append(classifierResult.classifier_name+"-"+classifierResult.view_name) for classifierResult in multiviewResults: - trainScores.append(classifierResult[2][metric[0]][0]) - testScores.append(classifierResult[2][metric[0]][1]) - multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult[0]) - multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult[0]+"Module") - classifiersNames.append(multiviewClassifierModule.genName(classifierResult[1])) + trainScores.append(classifierResult.metrics_scores[metric[0]][0]) + testScores.append(classifierResult.metrics_scores[metric[0]][1]) + multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name) + multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module") + classifiersNames.append(multiviewClassifierModule.genName(classifierResult.classifier_config)) metricsScores[metric[0]] = {"classifiersNames": classifiersNames, "trainScores": trainScores, "testScores": testScores} @@ -109,10 +109,10 @@ def getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, monoviewResults, mu errorOnExamples[unseenExamples]=-100 exampleErrors[classifierName] = errorOnExamples for classifierResult in multiviewResults: - multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult[0]) - multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult[0]+"Module") - classifierName = multiviewClassifierModule.genName(classifierResult[1]) - predictedLabels = classifierResult[3] + multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name) + multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module") + classifierName = multiviewClassifierModule.genName(classifierResult.classifier_config) + predictedLabels = classifierResult.full_labels errorOnExamples = predictedLabels==trueLabels errorOnExamples = errorOnExamples.astype(int) unseenExamples = np.where(trueLabels==-100)[0] @@ -405,19 +405,19 @@ def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamp multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1 for classifierResult in resMulti: - multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult[0]) - multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult[0]+"Module") - classifierName = multiviewClassifierModule.genName(classifierResult[1]) + multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name) + multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module") + classifierName = multiviewClassifierModule.genName(classifierResult.classifier_config) if classifierName not in multiclassResults[iterIndex]: multiclassResults[iterIndex][classifierName] = np.zeros((nbExamples,nbLabels),dtype=int) for exampleIndex in trainIndices: - label = classifierResult[3][exampleIndex] + label = classifierResult.full_labels[exampleIndex] if label == 1: multiclassResults[iterIndex][classifierName][exampleIndex, classifierPositive] += 1 else: multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1 for multiclassIndex, exampleIndex in enumerate(testMulticlassIndices): - label = classifierResult[4][multiclassIndex] + label = classifierResult.test_labels_multiclass[multiclassIndex] if label == 1: multiclassResults[iterIndex][classifierName][exampleIndex, classifierPositive] += 1 else: -- GitLab