Skip to content
Snippets Groups Projects
Commit ece0e18a authored by bbauvin's avatar bbauvin
Browse files

Minor changes

parent 81c4aa49
Branches
Tags
No related merge requests found
......@@ -236,6 +236,7 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
def genMetricsScores(results, trueLabels, metrics):
"""Used to add all the metrics scores to the multiclass result structure for each clf and each iteration"""
logging.debug("Start:\t Getting multiclass scores for each metric")
......@@ -255,8 +256,10 @@ def genMetricsScores(results, trueLabels, metrics):
def getErrorOnLabels(multiclassResults, multiclassLabels):
"""Used to add all the arrays showing on which example there is an error for each clf and each iteration"""
logging.debug("Start:\t Getting errors on each example for each classifier")
for iterIndex, iterResults in enumerate(multiclassResults):
for classifierName, classifierResults in iterResults.items():
errorOnExamples = classifierResults["labels"] == multiclassLabels
......@@ -268,6 +271,7 @@ def getErrorOnLabels(multiclassResults, multiclassLabels):
def publishMulticlassResults(multiclassResults):
# TODO : figure and folder organization
pass
......@@ -294,12 +298,22 @@ def analyzeMulticlass(results, statsIter, nbExamples, nbLabels, multiclassLabels
return multiclassResults
def getResults(results, statsIter, nbMulticlass, argumentDictionaries, multiclassLabels):
def analyzeBiclass(results):
# TODO
return ""
def analyzeIter(results):
# TODO
pass
def getResults(results, statsIter, nbMulticlass, argumentDictionaries, multiclassLabels, metrics):
if statsIter > 1:
if nbMulticlass > 1:
# TODO : analyze biclass results
multiclassResults = analyzeMulticlass(results, statsIter, argumentDictionaries, multiclassLabels)
analyzerIter(multiclassResults)
analyzeBiclass(results)
multiclassResults = analyzeMulticlass(results, statsIter, argumentDictionaries, multiclassLabels, metrics)
analyzeIter(multiclassResults)
else:
biclassResults = analyzeBiclass(results)
analyzeIter(biclassResults)
......@@ -401,7 +415,7 @@ def execBenchmark(nbCores, statsIter, nbMulticlass, argumentsDictionaries, multi
# Do everything with flagging
logging.debug("Start:\t Analyzing preds")
# getResults(results, statsIter, nbMulticlass, argumentsDictionaries, multiclassLabels)
# getResults(results, statsIter, nbMulticlass, argumentsDictionaries, multiclassLabels, metrics)
logging.debug("Done:\t Analyzing preds")
return results
......
......@@ -253,6 +253,7 @@ class Test_genMetricsScores(unittest.TestCase):
cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
cls.assertEqual(cls.score_to_get_f1, multiclassResults[1]["cheese_is_no_disease"]["metricsScores"]["f1_score"])
class Test_getErrorOnLabels(unittest.TestCase):
@classmethod
......@@ -267,6 +268,24 @@ class Test_getErrorOnLabels(unittest.TestCase):
multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
def test_full(cls):
cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
"cheese_is_no_disease": {"labels": cls.wrong_labels}},
{"chicken_is_heaven": {"labels": cls.wrong_labels},
"cheese_is_no_disease": {"labels": cls.wrong_labels}},
]
multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
np.testing.assert_array_equal(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
multiclassResults[1]["cheese_is_no_disease"]["errorOnExample"])
def test_type(cls):
multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
cls.assertEqual(type(multiclassResults[0]["chicken_is_heaven"]["errorOnExample"][0]), np.int64)
np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
#
# class Essai(unittest.TestCase):
#
......
# Mono- and Multi-view classification benchmark
[![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](http://www.gnu.org/licenses/gpl-3.0)
[![Build Status](https://travis-ci.com/babau1/multiview-machine-learning-omis.svg?token=pjoowx3poApRRtwqHTpd&branch=master)](https://travis-ci.com/babau1/multiview-machine-learning-omis)
# Mono- and Multi-view classification benchmark
This project aims to be an easy-to use solution to run a prior benchmark on a dataset and evaluate mono- & multi-view algorithms capacity to classify it correctly.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment