Skip to content
Snippets Groups Projects
Commit 91e292f1 authored by bbauvin's avatar bbauvin
Browse files

Biclass analysis is working need to debug mullticlass

parent 0d05a153
Branches
Tags
No related merge requests found
......@@ -295,7 +295,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None, directory=Non
def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, classificationIndices, directories,
directory, multiClassLabels, metrics, labelsDictionary, nbLabels, DATASET,
execOneBenchmark=execOneBenchmark, execOneBenchmark_multicore=execOneBenchmark_multicore,
execOneBenchmarkMonoCore=execOneBenchmarkMonoCore, getResults=getResults):
execOneBenchmarkMonoCore=execOneBenchmarkMonoCore, getResults=getResults, delete=DB.deleteHDF5):
"""Used to execute the needed benchmark(s) on multicore or mono-core functions
The execOneBenchmark and execOneBenchmark_multicore keywords args are only used in the tests"""
# TODO : find a way to flag
......@@ -320,7 +320,7 @@ def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionari
if nbCores > 1:
logging.debug("Start:\t Deleting " + str(nbCores) + " temporary datasets for multiprocessing")
args = benchmarkArgumentsDictionaries[0]["args"]
datasetFiles = DB.deleteHDF5(args.pathF, args.name, nbCores)
datasetFiles = delete(args.pathF, args.name, nbCores)
logging.debug("Start:\t Deleting datasets for multiprocessing")
# Do everything with flagging
nbExamples = len(classificationIndices[0][0])+len(classificationIndices[0][1])
......
......@@ -2,6 +2,7 @@ import unittest
import argparse
import os
import numpy as np
import h5py
from sklearn.metrics import accuracy_score
from ..MonoMultiViewClassifiers import ExecClassif
......@@ -36,59 +37,68 @@ class Test_initMonoviewArguments(unittest.TestCase):
arguments = ExecClassif.initMonoviewExps(benchmark, {}, {}, 0, {})
def fakeBenchmarkExec(coreIndex=-1, a=7):
def fakeBenchmarkExec(coreIndex=-1, a=7, args=1):
return [coreIndex, a]
def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6):
def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6, args=1):
return [nbCores,a]
def fakeBenchmarkExec_monocore(DATASET=1, a=4):
return [DATASET, a]
def fakeBenchmarkExec_monocore(DATASET=1, a=4, args=1):
return [a]
def fakegetResults(results, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, multiClassLabels, metrics,
classificationIndices, directories, directory, labelsDictionary, nbExamples, nbLabels):
return 3
def fakeDelete(a, b,c):
return 9
class Test_execBenchmark(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.argumentDictionaries = [{"a": 4}]
os.mkdir("Code/Tests/tmp_tests")
cls.Dataset = h5py.File("Code/Tests/tmp_tests/test_file.hdf5", "w")
cls.labels = cls.Dataset.create_dataset("Labels", data=np.array([0, 1, 2]))
cls.argumentDictionaries = [{"a": 4, "args":FakeArg()}]
def test_simple(cls):
res = ExecClassif.execBenchmark(1,2,3,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11, execOneBenchmark=fakeBenchmarkExec,
res = ExecClassif.execBenchmark(1,2,3,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset, execOneBenchmark=fakeBenchmarkExec,
execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore, getResults=fakegetResults)
cls.assertEqual(res, [[11,4]])
execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore, getResults=fakegetResults, delete=fakeDelete)
cls.assertEqual(res, [[4]])
def test_multiclass_no_iter(cls):
cls.argumentDictionaries = [{"a": 10}, {"a": 4}]
res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
cls.argumentDictionaries = [{"a": 10, "args":FakeArg()}, {"a": 4, "args":FakeArg()}]
res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
execOneBenchmark=fakeBenchmarkExec,
execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
getResults=fakegetResults)
getResults=fakegetResults, delete=fakeDelete)
cls.assertEqual(res, [[0,10], [1,4]])
def test_multiclass_and_iter(cls):
cls.argumentDictionaries = [{"a": 10}, {"a": 4}, {"a": 55}, {"a": 24}]
res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
cls.argumentDictionaries = [{"a": 10, "args":FakeArg()}, {"a": 4, "args":FakeArg()}, {"a": 55, "args":FakeArg()}, {"a": 24, "args":FakeArg()}]
res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
execOneBenchmark=fakeBenchmarkExec,
execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
getResults=fakegetResults)
getResults=fakegetResults, delete=fakeDelete)
cls.assertEqual(res, [[0,10], [1,4], [0,55], [1,24]])
def test_no_iter_biclass_multicore(cls):
res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
execOneBenchmark=fakeBenchmarkExec,
execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
getResults=fakegetResults)
getResults=fakegetResults, delete=fakeDelete)
cls.assertEqual(res, [[2,4]])
@classmethod
def tearDownClass(cls):
os.remove("Code/Tests/tmp_tests/test_file.hdf5")
os.rmdir("Code/Tests/tmp_tests")
def fakeExecMono(directory, name, labelsNames, classificationIndices, kFolds, coreIndex, type, pathF, randomState, labels,
hyperParamSearch="try", metrics="try", nIter=1, **arguments):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment