Skip to content
Snippets Groups Projects
Commit 1f065059 authored by bbauvin's avatar bbauvin
Browse files

tests are running

parent d33b9961
Branches
Tags
No related merge requests found
......@@ -223,10 +223,7 @@ def genMetricsScoresMulticlass(results, trueLabels, metrics, argumentsDictionari
resultDictionary["labels"][multiclassTestIndices],
multiclass=True)
results[iterIndex][classifierName]["metricsScores"][metric[0]] = [trainScore, testScore]
logging.debug("Done:\t Getting multiclass scores for each metric")
return results
......@@ -318,8 +315,6 @@ def publishMulticlassExmapleErrors(multiclassResults, directories, databaseName,
red_patch = mpatches.Patch(color='red', label='Classifier failed')
green_patch = mpatches.Patch(color='green', label='Classifier succeded')
plt.legend(handles=[red_patch, green_patch], bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",mode="expand", borderaxespad=0, ncol=2)
# cbar = fig.colorbar(cax, ticks=[0, 1])
# cbar.ax.set_yticklabels(['Wrong', ' Right'])
fig.tight_layout()
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + databaseName +"-error_analysis.png", bbox_inches="tight")
plt.close()
......
......@@ -9,14 +9,14 @@ class Test_getMetricsScoresBiclass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.metrics = [["accuracy_score"]]
cls.monoViewResults = [["", ["chicken_is_heaven", "", {"accuracy_score": [0.5,0.7]}]]]
cls.monoViewResults = [["", ["chicken_is_heaven", ["View0"], {"accuracy_score": [0.5,0.7]}]]]
cls.multiviewResults = [["Mumbo", {"":""}, {"accuracy_score":[0.6,0.8]}]]
def test_simple(cls):
res = ResultAnalysis.getMetricsScoresBiclass(cls.metrics, cls.monoViewResults, cls.multiviewResults)
cls.assertIn("accuracy_score",res)
cls.assertEqual(type(res["accuracy_score"]), dict)
cls.assertEqual(res["accuracy_score"]["classifiersNames"], ["chicken_is_heaven", "Mumbo"])
cls.assertEqual(res["accuracy_score"]["classifiersNames"], ["chicken_is_heaven-View0", "Mumbo"])
cls.assertEqual(res["accuracy_score"]["trainScores"], [0.5, 0.6])
cls.assertEqual(res["accuracy_score"]["testScores"], [0.7, 0.8])
......@@ -35,13 +35,13 @@ class Test_getExampleErrorsBiclass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.usedBenchmarkArgumentDictionary = {"labels": np.array([0,1,1,-100,-100,0,1,1,-100])}
cls.monoViewResults = [["", ["chicken_is_heaven", "", {}, np.array([1,1,1,-100,-100,0,1,1,-100])]]]
cls.monoViewResults = [["", ["chicken_is_heaven", ["View0"], {}, np.array([1,1,1,-100,-100,0,1,1,-100])]]]
cls.multiviewResults = [["Mumbo", {"":""}, {}, np.array([0,0,1,-100,-100,0,1,1,-100])]]
def test_simple(cls):
res = ResultAnalysis.getExampleErrorsBiclass(cls.usedBenchmarkArgumentDictionary, cls.monoViewResults,
cls.multiviewResults)
cls.assertIn("chicken_is_heaven", res)
cls.assertIn("chicken_is_heaven-View0", res)
cls.assertIn("Mumbo", res)
np.testing.assert_array_equal(res["Mumbo"], np.array([1,0,1,-100,-100,1,1,1,-100]))
np.testing.assert_array_equal(res["chicken_is_heaven"], np.array([0,1,1,-100,-100,1,1,1,-100]))
\ No newline at end of file
np.testing.assert_array_equal(res["chicken_is_heaven-View0"], np.array([0,1,1,-100,-100,1,1,1,-100]))
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment