Skip to content
Snippets Groups Projects
Commit 532d4dc8 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

cleaned the analyzeResults file for multiview and added tests for execmonoview

parent 07fbe9b0
Branches
Tags
No related merge requests found
Showing with 121 additions and 137 deletions
......@@ -51,8 +51,6 @@ def canProbas():
return False
def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
modelType = kwargs['0']
maxRules = int(kwargs['1'])
......
......@@ -57,7 +57,7 @@ def saveResults(LABELS_DICTIONARY, stringAnalysis, views, classifierModule, clas
outputTextFile.close()
if imagesAnalysis is not None:
for imageName in imagesAnalysis:
for imageName in imagesAnalysis.keys():
if os.path.isfile(outputFileName + imageName + ".png"):
for i in range(1, 20):
testFileName = outputFileName + imageName + "-" + str(i) + ".png"
......
import numpy as np
def genName(config):
return "FatLateFusion"
def getBenchmark(benchmark, args=None):
benchmark["Multiview"]["FatLateFusion"] = ["take_everything"]
return benchmark
......@@ -31,8 +29,8 @@ def getArgs(args, benchmark, views, viewsIndices, randomState, directory, result
def genParamsSets(classificationKWARGS, randomState, nIter=1):
"""Used to generate parameters sets for the random hyper parameters optimization function"""
nbMonoviewClassifiers = len(classificationKWARGS["monoviewDecisions"])
weights = [randomState.random_sample(nbMonoviewClassifiers) for _ in range(len(classificationKWARGS["monoviewDecisions"]))]
nomralizedWeights = [weights/np.sum(weights)]
weights = [randomState.random_sample(nbMonoviewClassifiers) for _ in range(nIter)]
nomralizedWeights = [[weightVector/np.sum(weightVector)] for weightVector in weights]
return nomralizedWeights
class FatLateFusionClass:
......
from ... import Metrics
from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
# Author-Info
__author__ = "Baptiste Bauvin"
......@@ -39,44 +40,3 @@ def execute(classifier, trainLabels,
imagesAnalysis = {}
return stringAnalysis, imagesAnalysis, metricsScores
def printMetricScore(metricScores, metrics):
metricScoreString = "\n\n"
for metric in metrics:
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
metricScoreString += "\n\n"
return metricScoreString
def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
try:
trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
except:
print(labels[learningIndices])
print(trainLabels)
import pdb;pdb.set_trace()
testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
return [trainScore, testScore]
def getMetricsScores(metrics, trainLabels, testLabels,
validationIndices, learningIndices, labels):
metricsScores = {}
for metric in metrics:
metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
validationIndices, learningIndices, labels)
return metricsScores
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import operator
from .Methods import LateFusion
from ... import Metrics
from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
# Author-Info
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def printMetricScore(metricScores, metrics):
metricScoreString = "\n\n"
for metric in metrics:
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
metricScoreString += "\n\n"
return metricScoreString
def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
try:
trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
except:
print(labels[learningIndices])
print(trainLabels)
import pdb;pdb.set_trace()
testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
return [trainScore, testScore]
def getMetricsScores(metrics, trainLabels, testLabels,
validationIndices, learningIndices, labels):
metricsScores = {}
for metric in metrics:
metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
validationIndices, learningIndices, labels)
return metricsScores
def execute(classifier, trainLabels,
testLabels, DATASET,
classificationKWARGS, classificationIndices,
......
......@@ -7,6 +7,7 @@ import numpy as np
from ... import Metrics
from ...utils.Dataset import getV, getShape
from . import Classifiers
from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
# Author-Info
__author__ = "Baptiste Bauvin"
......@@ -109,9 +110,6 @@ def getAlgoConfig(classifier, classificationKWARGS, nbCores, viewNames, hyperPar
algoString += "\n\n"
algoString += "\n\nComputation time on " + str(nbCores) + " cores : \n\tDatabase extraction time : " + str(
hms(seconds=int(extractionTime))) + "\n\t"
row_format = "{:>15}" * 3
algoString += row_format.format("", *['Learn', 'Prediction'])
algoString += '\n\t'
algoString += "\n\tSo a total classification time of " + str(hms(seconds=int(classificationTime))) + ".\n\n"
algoString += "\n\n"
return algoString, classifierAnalysis
......@@ -121,7 +119,7 @@ def getReport(classifier, CLASS_LABELS, classificationIndices, DATASET, trainLab
testLabels, viewIndices, metric):
learningIndices, validationIndices, multiviewTestIndices = classificationIndices
nbView = len(viewIndices)
NB_CLASS = len(set(CLASS_LABELS)) # DATASET.get("Metadata").attrs["nbClass"]
NB_CLASS = len(set(CLASS_LABELS))
metricModule = getattr(Metrics, metric[0])
fakeViewsIndicesDict = dict(
(viewIndex, fakeViewIndex) for viewIndex, fakeViewIndex in zip(viewIndices, range(nbView)))
......@@ -178,43 +176,6 @@ def modifiedMean(surplusAccuracies):
return meanAccuracies
def printMetricScore(metricScores, metrics):
metricScoreString = "\n\n"
for metric in metrics:
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
metricScoreString += "\n\n"
return metricScoreString
def getTotalMetricScores(metric, trainLabels, testLabels,
validationIndices, learningIndices, labels):
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
validationIndices = validationIndices
trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
return [trainScore, testScore]
def getMetricsScores(metrics, trainLabels, testLabels,
validationIndices, learningIndices, labels):
metricsScores = {}
for metric in metrics:
metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
validationIndices, learningIndices, labels)
return metricsScores
def getMeanIterations(kFoldClassifierStats, foldIndex):
iterations = np.array([kFoldClassifier[foldIndex].iterIndex + 1 for kFoldClassifier in kFoldClassifierStats])
return np.mean(iterations)
......@@ -227,6 +188,7 @@ def execute(classifier, trainLabels,
databaseName, KFolds,
hyperParamSearch, nIter, metrics,
viewsIndices, randomState, labels):
learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
if classifier.classifiersConfigs is None:
metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
......
from .. import Metrics
# Author-Info
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def printMetricScore(metricScores, metrics):
metricScoreString = "\n\n"
for metric in metrics:
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
metricScoreString += "\n\n"
return metricScoreString
def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
metricModule = getattr(Metrics, metric[0])
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
try:
trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
except:
print(labels[learningIndices])
print(trainLabels)
import pdb;pdb.set_trace()
testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
return [trainScore, testScore]
def getMetricsScores(metrics, trainLabels, testLabels,
validationIndices, learningIndices, labels):
metricsScores = {}
for metric in metrics:
metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
validationIndices, learningIndices, labels)
return metricsScores
......@@ -3,6 +3,9 @@ import os
from ...MonoMultiViewClassifiers.Metrics import accuracy_score
# Tester que chaque metrique a bien les bonnes fonctions qui renvoient bien les bons types d'outputs avec les bons types d'inputs
# Faire de meme pour les differents classifeurs monovues et les differents classifeurs multivues
class Test_accuracy_score(unittest.TestCase):
......
......@@ -77,3 +77,66 @@ class Test_initTrainTest(unittest.TestCase):
np.array([270,189,445,174,445])]))
np.testing.assert_array_equal(y_train, np.array([0,0,1,0,0]))
np.testing.assert_array_equal(y_test, np.array([1,1,0,0,0]))
class Test_getKWARGS(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.classifierModule = None
cls.hyperParamSearch = None
cls.nIter = 2
cls.CL_type = "string"
cls.X_train = np.zeros((10,20))
cls.y_train = np.zeros((10))
cls.randomState = np.random.RandomState(42)
cls.outputFileName = "test_file"
cls.KFolds = None
cls.nbCores = 1
cls.metrics = {"accuracy_score":""}
cls.kwargs = {}
def test_simple(cls):
clKWARGS = ExecClassifMonoView.getKWARGS(cls.classifierModule,
cls.hyperParamSearch,
cls.nIter,
cls.CL_type,
cls.X_train,
cls.y_train,
cls.randomState,
cls.outputFileName,
cls.KFolds,
cls.nbCores,
cls.metrics,
cls.kwargs)
pass
class Test_saveResults(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.stringAnalysis = "string analysis"
cls.outputFileName = "test_file"
cls.full_labels_pred = np.zeros(10)
cls.y_train_pred = np.ones(5)
cls.y_train = np.zeros(5)
cls.imagesAnalysis = {}
def test_simple(cls):
ExecClassifMonoView.saveResults(cls.stringAnalysis,
cls.outputFileName,
cls.full_labels_pred,
cls.y_train_pred,
cls.y_train,
cls.imagesAnalysis)
# Test if the files are created with the right content
def test_with_image_analysis(cls):
cls.imagesAnalysis = {"test_image":"image.png"} # Image to gen
ExecClassifMonoView.saveResults(cls.stringAnalysis,
cls.outputFileName,
cls.full_labels_pred,
cls.y_train_pred,
cls.y_train,
cls.imagesAnalysis)
# Test if the files are created with the right content
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment