Skip to content
Snippets Groups Projects
Commit a45ff5c2 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Simplified reslut analysis

parent fe9ff80e
No related branches found
No related tags found
No related merge requests found
...@@ -358,7 +358,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None, directory=Non ...@@ -358,7 +358,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None, directory=Non
metrics=metrics, nIter=args.CL_HPS_iter, **arguments)] metrics=metrics, nIter=args.CL_HPS_iter, **arguments)]
logging.debug("Done:\t Multiview benchmark") logging.debug("Done:\t Multiview benchmark")
return [flag, resultsMonoview, resultsMultiview] return [flag, resultsMonoview + resultsMultiview]
def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, classificationIndices, directories, def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, classificationIndices, directories,
......
...@@ -187,6 +187,8 @@ class MonoviewResult(object): ...@@ -187,6 +187,8 @@ class MonoviewResult(object):
self.y_test_multiclass_pred = y_test_multiclass_pred self.y_test_multiclass_pred = y_test_multiclass_pred
self.test_folds_preds = test_folds_preds self.test_folds_preds = test_folds_preds
def get_classifier_name(self):
return self.classifier_name+"-"+self.view_name
......
from .. import MultiviewClassifiers
class MultiviewResult(object): class MultiviewResult(object):
def __init__(self, classifier_name, classifier_config, def __init__(self, classifier_name, classifier_config,
metrics_scores, full_labels, test_labels_multiclass): metrics_scores, full_labels, test_labels_multiclass):
self.classifier_name = classifier_name self.classifier_name = classifier_name
self.classifier_config = classifier_config self.classifier_config = classifier_config
self.metrics_scores = metrics_scores self.metrics_scores = metrics_scores
self.full_labels = full_labels self.full_labels_pred = full_labels
self.test_labels_multiclass = test_labels_multiclass self.y_test_multiclass_pred = test_labels_multiclass
\ No newline at end of file
def get_classifier_name(self):
multiviewClassifierPackage = getattr(MultiviewClassifiers, self.classifier_name)
multiviewClassifierModule = getattr(multiviewClassifierPackage, self.classifier_name + "Module")
return multiviewClassifierModule.genName(self.classifier_config)
\ No newline at end of file
...@@ -59,65 +59,56 @@ def autolabel(rects, ax, set=1, std=None): ...@@ -59,65 +59,56 @@ def autolabel(rects, ax, set=1, std=None):
ha='center', va='bottom', size="small") ha='center', va='bottom', size="small")
def getMetricsScoresBiclass(metrics, monoviewResults, multiviewResults): def getMetricsScoresBiclass(metrics, results):
r"""Used to extract metrics scores in case of biclass classification r"""Used to extract metrics scores in case of biclass classification
Parameters Parameters
---------- ----------
metrics : list of lists metrics : list of lists
The metrics names with configuration metrics[i][0] = name of metric i The metrics names with configuration metrics[i][0] = name of metric i
monoviewResults : list of MonoviewResult objects results : list of MonoviewResult and MultiviewResults objects
A list containing all the resluts for all the monoview experimentations. A list containing all the resluts for all the monoview experimentations.
set : integer
1 means the test scores, anything else means the train score
std: None or array
The standard deviations in the case of statsIter results.
Returns Returns
------- -------
metricsScores : dict of dict of list
Regroups all the scores for each metrics for each classifier and for the train and test sets.
organized as :
-`metricScores[metric_name]["classifiersNames"]` is a list of all the classifiers available for this metric,
-`metricScores[metric_name]["trainScores"]` is a list of all the available classifiers scores on the train set,
-`metricScores[metric_name]["testScores"]` is a list of all the available classifiers scores on the test set.
""" """
metricsScores = {} metricsScores = {}
for metric in metrics: for metric in metrics:
classifiersNames = [] classifiersNames = []
trainScores = [] trainScores = []
testScores = [] testScores = []
for classifierResult in monoviewResults:
trainScores.append(classifierResult.metrics_scores[metric[0]][0]) for classifierResult in results:
testScores.append(classifierResult.metrics_scores[metric[0]][1])
classifiersNames.append(classifierResult.classifier_name+"-"+classifierResult.view_name)
for classifierResult in multiviewResults:
trainScores.append(classifierResult.metrics_scores[metric[0]][0]) trainScores.append(classifierResult.metrics_scores[metric[0]][0])
testScores.append(classifierResult.metrics_scores[metric[0]][1]) testScores.append(classifierResult.metrics_scores[metric[0]][1])
multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name) classifiersNames.append(classifierResult.get_classifier_name())
multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module")
classifiersNames.append(multiviewClassifierModule.genName(classifierResult.classifier_config))
metricsScores[metric[0]] = {"classifiersNames": classifiersNames, metricsScores[metric[0]] = {"classifiersNames": classifiersNames,
"trainScores": trainScores, "trainScores": trainScores,
"testScores": testScores} "testScores": testScores}
return metricsScores return metricsScores
def getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, monoviewResults, multiviewResults): def getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, results):
exampleErrors = {} exampleErrors = {}
trueLabels = usedBenchmarkArgumentDictionary["labels"] trueLabels = usedBenchmarkArgumentDictionary["labels"]
for classifierResult in monoviewResults:
classifierName = classifierResult.classifier_name+"-"+classifierResult.view_name for classifierResult in results:
classifierName = classifierResult.get_classifier_name()
predictedLabels = classifierResult.full_labels_pred predictedLabels = classifierResult.full_labels_pred
errorOnExamples = predictedLabels==trueLabels errorOnExamples = predictedLabels==trueLabels
errorOnExamples = errorOnExamples.astype(int) errorOnExamples = errorOnExamples.astype(int)
unseenExamples = np.where(trueLabels==-100)[0] unseenExamples = np.where(trueLabels==-100)[0]
errorOnExamples[unseenExamples]=-100 errorOnExamples[unseenExamples]=-100
exampleErrors[classifierName] = errorOnExamples exampleErrors[classifierName] = errorOnExamples
for classifierResult in multiviewResults:
multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name)
multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module")
classifierName = multiviewClassifierModule.genName(classifierResult.classifier_config)
predictedLabels = classifierResult.full_labels
errorOnExamples = predictedLabels==trueLabels
errorOnExamples = errorOnExamples.astype(int)
unseenExamples = np.where(trueLabels==-100)[0]
errorOnExamples[unseenExamples]=-100
exampleErrors[classifierName] = errorOnExamples
return exampleErrors return exampleErrors
...@@ -219,8 +210,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames, mi ...@@ -219,8 +210,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames, mi
def analyzeBiclass(results, benchmarkArgumentDictionaries, statsIter, metrics): def analyzeBiclass(results, benchmarkArgumentDictionaries, statsIter, metrics):
logging.debug("Srart:\t Analzing all biclass resuls") logging.debug("Srart:\t Analzing all biclass resuls")
biclassResults = [{} for _ in range(statsIter)] biclassResults = [{} for _ in range(statsIter)]
for result in results: for flag, result in results:
flag = result[0]
iteridex = flag[0] iteridex = flag[0]
classifierPositive = flag[1][0] classifierPositive = flag[1][0]
classifierNegative = flag[1][1] classifierNegative = flag[1][1]
...@@ -229,10 +219,8 @@ def analyzeBiclass(results, benchmarkArgumentDictionaries, statsIter, metrics): ...@@ -229,10 +219,8 @@ def analyzeBiclass(results, benchmarkArgumentDictionaries, statsIter, metrics):
for benchmarkArgumentDictionary in benchmarkArgumentDictionaries: for benchmarkArgumentDictionary in benchmarkArgumentDictionaries:
if benchmarkArgumentDictionary["flag"]==flag: if benchmarkArgumentDictionary["flag"]==flag:
usedBenchmarkArgumentDictionary = benchmarkArgumentDictionary usedBenchmarkArgumentDictionary = benchmarkArgumentDictionary
monoviewResults = result[1] metricsScores = getMetricsScoresBiclass(metrics, result)
multiviewResults = result[2] exampleErrors = getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, result)
metricsScores = getMetricsScoresBiclass(metrics, monoviewResults, multiviewResults)
exampleErrors = getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, monoviewResults, multiviewResults)
directory = usedBenchmarkArgumentDictionary["directory"] directory = usedBenchmarkArgumentDictionary["directory"]
databaseName = usedBenchmarkArgumentDictionary["args"].name databaseName = usedBenchmarkArgumentDictionary["args"].name
labelsNames = [usedBenchmarkArgumentDictionary["LABELS_DICTIONARY"][0], labelsNames = [usedBenchmarkArgumentDictionary["LABELS_DICTIONARY"][0],
...@@ -380,15 +368,18 @@ def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamp ...@@ -380,15 +368,18 @@ def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamp
metrics, classificationIndices, directories): metrics, classificationIndices, directories):
"""Used to transform one versus one results in multiclass results and to publish it""" """Used to transform one versus one results in multiclass results and to publish it"""
multiclassResults = [{} for _ in range(statsIter)] multiclassResults = [{} for _ in range(statsIter)]
for flag, resMono, resMulti in results:
for flag, result in results:
iterIndex = flag[0] iterIndex = flag[0]
classifierPositive = flag[1][0] classifierPositive = flag[1][0]
classifierNegative = flag[1][1] classifierNegative = flag[1][1]
for benchmarkArgumentDictionary in benchmarkArgumentDictionaries: for benchmarkArgumentDictionary in benchmarkArgumentDictionaries:
if benchmarkArgumentDictionary["flag"] == flag: if benchmarkArgumentDictionary["flag"] == flag:
trainIndices, testIndices, testMulticlassIndices = benchmarkArgumentDictionary["classificationIndices"] trainIndices, testIndices, testMulticlassIndices = benchmarkArgumentDictionary["classificationIndices"]
for classifierResult in resMono:
classifierName = classifierResult.classifier_name+"-"+classifierResult.view_name for classifierResult in result:
classifierName = classifierResult.get_classifier_name()
if classifierName not in multiclassResults[iterIndex]: if classifierName not in multiclassResults[iterIndex]:
multiclassResults[iterIndex][classifierName] = np.zeros((nbExamples, nbLabels),dtype=int) multiclassResults[iterIndex][classifierName] = np.zeros((nbExamples, nbLabels),dtype=int)
for exampleIndex in trainIndices: for exampleIndex in trainIndices:
...@@ -404,31 +395,6 @@ def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamp ...@@ -404,31 +395,6 @@ def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamp
else: else:
multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1 multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1
for classifierResult in resMulti:
multiviewClassifierPackage = getattr(MultiviewClassifiers, classifierResult.classifier_name)
multiviewClassifierModule = getattr(multiviewClassifierPackage, classifierResult.classifier_name+"Module")
classifierName = multiviewClassifierModule.genName(classifierResult.classifier_config)
if classifierName not in multiclassResults[iterIndex]:
multiclassResults[iterIndex][classifierName] = np.zeros((nbExamples,nbLabels),dtype=int)
for exampleIndex in trainIndices:
label = classifierResult.full_labels[exampleIndex]
if label == 1:
multiclassResults[iterIndex][classifierName][exampleIndex, classifierPositive] += 1
else:
multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1
for multiclassIndex, exampleIndex in enumerate(testMulticlassIndices):
label = classifierResult.test_labels_multiclass[multiclassIndex]
if label == 1:
multiclassResults[iterIndex][classifierName][exampleIndex, classifierPositive] += 1
else:
multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1
# for exampleIndex, label in enumerate(classifierResult[3]):
# if label == 1:
# multiclassResults[iterIndex][classifierName][exampleIndex, classifierPositive] += 1
# else:
# multiclassResults[iterIndex][classifierName][exampleIndex, classifierNegative] += 1
for iterIndex, multiclassiterResult in enumerate(multiclassResults): for iterIndex, multiclassiterResult in enumerate(multiclassResults):
for key, value in multiclassiterResult.items(): for key, value in multiclassiterResult.items():
multiclassResults[iterIndex][key] = {"labels": np.argmax(value, axis=1)} multiclassResults[iterIndex][key] = {"labels": np.argmax(value, axis=1)}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment