Skip to content
Snippets Groups Projects
Commit 68869eaf authored by bbauvin's avatar bbauvin
Browse files

randomState and no-stats works

parent 91cbd8d3
No related branches found
No related tags found
No related merge requests found
Showing
with 392 additions and 411 deletions
...@@ -404,6 +404,7 @@ groupLateFusion.add_argument('--FU_L_select_monoview', metavar='STRING', action= ...@@ -404,6 +404,7 @@ groupLateFusion.add_argument('--FU_L_select_monoview', metavar='STRING', action=
help='Determine which method to use to select the monoview classifiers', help='Determine which method to use to select the monoview classifiers',
default="intersect") default="intersect")
start = time.time()
args = parser.parse_args() args = parser.parse_args()
os.nice(args.nice) os.nice(args.nice)
...@@ -412,7 +413,6 @@ statsIter = args.CL_statsiter ...@@ -412,7 +413,6 @@ statsIter = args.CL_statsiter
randomState = np.random.RandomState(args.randomState) randomState = np.random.RandomState(args.randomState)
hyperParamSearch = args.CL_HPS_type hyperParamSearch = args.CL_HPS_type
start = time.time()
if args.name not in ["MultiOmic", "ModifiedMultiOmic", "Caltech", "Fake", "Plausible", "KMultiOmic"]: if args.name not in ["MultiOmic", "ModifiedMultiOmic", "Caltech", "Fake", "Plausible", "KMultiOmic"]:
getDatabase = getattr(DB, "getClassicDB" + args.type[1:]) getDatabase = getattr(DB, "getClassicDB" + args.type[1:])
...@@ -475,7 +475,7 @@ if nbCores > 1: ...@@ -475,7 +475,7 @@ if nbCores > 1:
for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))): for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))):
resultsMonoview += (Parallel(n_jobs=nbCores)( resultsMonoview += (Parallel(n_jobs=nbCores)(
delayed(ExecMonoview_multicore)(directory, args.name, labelsNames, classificationIndices, kFolds, delayed(ExecMonoview_multicore)(directory, args.name, labelsNames, classificationIndices, kFolds,
coreIndex, args.type, args.pathF, statsIter, randomState, coreIndex, args.type, args.pathF, randomState,
hyperParamSearch=hyperParamSearch, hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=args.CL_GS_iter, metrics=metrics, nIter=args.CL_GS_iter,
**argumentDictionaries["Monoview"][coreIndex + stepIndex * nbCores]) **argumentDictionaries["Monoview"][coreIndex + stepIndex * nbCores])
...@@ -490,7 +490,7 @@ if nbCores > 1: ...@@ -490,7 +490,7 @@ if nbCores > 1:
else: else:
resultsMonoview += ([ExecMonoview(directory, DATASET.get("View" + str(arguments["viewIndex"])), resultsMonoview += ([ExecMonoview(directory, DATASET.get("View" + str(arguments["viewIndex"])),
DATASET.get("Labels").value, args.name, labelsNames, DATASET.get("Labels").value, args.name, labelsNames,
classificationIndices, kFolds, 1, args.type, args.pathF, statsIter, randomState, classificationIndices, kFolds, 1, args.type, args.pathF, randomState,
hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=args.CL_GS_iter, hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=args.CL_GS_iter,
**arguments) **arguments)
for arguments in argumentDictionaries["Monoview"]]) for arguments in argumentDictionaries["Monoview"]])
...@@ -511,14 +511,14 @@ if nbCores > 1: ...@@ -511,14 +511,14 @@ if nbCores > 1:
resultsMultiview += Parallel(n_jobs=nbCores)( resultsMultiview += Parallel(n_jobs=nbCores)(
delayed(ExecMultiview_multicore)(directory, coreIndex, args.name, classificationIndices, kFolds, args.type, delayed(ExecMultiview_multicore)(directory, coreIndex, args.name, classificationIndices, kFolds, args.type,
args.pathF, args.pathF,
LABELS_DICTIONARY, statsIter, randomState, hyperParamSearch=hyperParamSearch, LABELS_DICTIONARY, randomState, hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=args.CL_GS_iter, metrics=metrics, nIter=args.CL_GS_iter,
**argumentDictionaries["Multiview"][stepIndex * nbCores + coreIndex]) **argumentDictionaries["Multiview"][stepIndex * nbCores + coreIndex])
for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores))) for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores)))
else: else:
resultsMultiview = [ resultsMultiview = [
ExecMultiview(directory, DATASET, args.name, classificationIndices, kFolds, 1, args.type, args.pathF, ExecMultiview(directory, DATASET, args.name, classificationIndices, kFolds, 1, args.type, args.pathF,
LABELS_DICTIONARY, statsIter, randomState, hyperParamSearch=hyperParamSearch, LABELS_DICTIONARY, randomState, hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=args.CL_GS_iter, **arguments) for arguments in metrics=metrics, nIter=args.CL_GS_iter, **arguments) for arguments in
argumentDictionaries["Multiview"]] argumentDictionaries["Multiview"]]
multiviewTime = time.time() - monoviewTime - dataBaseTime - start multiviewTime = time.time() - monoviewTime - dataBaseTime - start
...@@ -536,3 +536,10 @@ analyzeLabels(labels, trueLabels, results, directory) ...@@ -536,3 +536,10 @@ analyzeLabels(labels, trueLabels, results, directory)
logging.debug("Start:\t Analyze Global Results") logging.debug("Start:\t Analyze Global Results")
resultAnalysis(benchmark, results, args.name, times, metrics, directory) resultAnalysis(benchmark, results, args.name, times, metrics, directory)
logging.debug("Done:\t Analyze Global Results") logging.debug("Done:\t Analyze Global Results")
globalAnalysisTime = time.time() - monoviewTime - dataBaseTime - start - multiviewTime
totalTime = time.time() - start
logging.info("Extraction time : "+str(dataBaseTime)+
"s, Monoview time : "+str(monoviewTime)+
"s, Multiview Time : "+str(multiviewTime)+
"s, Global Analysis Time : "+str(globalAnalysisTime)+
"s, Total Duration : "+str(totalTime)+"s")
\ No newline at end of file
...@@ -218,7 +218,7 @@ def calcSURFSIFTDescriptors(dfImages, boolSIFT): ...@@ -218,7 +218,7 @@ def calcSURFSIFTDescriptors(dfImages, boolSIFT):
elif(float(i)/float(len(npImages))>0.5 and bool_Progress==False): elif(float(i)/float(len(npImages))>0.5 and bool_Progress==False):
logging.debug(feat + "50% of images processed (Keypoints)") logging.debug(feat + "50% of images processed (Keypoints)")
bool_Progress = None bool_Progress = None
elif(float(i)/float(len(npImages))>0.75 and bool_Progress==None): elif float(i)/float(len(npImages))>0.75 and bool_Progress==None:
logging.debug(feat + "75% of images processed (Keypoints)") logging.debug(feat + "75% of images processed (Keypoints)")
bool_Progress = NotImplemented bool_Progress = NotImplemented
......
...@@ -30,7 +30,7 @@ __status__ = "Prototype" # Production, Development, Prototype ...@@ -30,7 +30,7 @@ __status__ = "Prototype" # Production, Development, Prototype
__date__ = 2016-03-25 __date__ = 2016-03-25
def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices, KFolds, datasetFileIndex, databaseType, path, statsIter, randomState, hyperParamSearch="randomizedSearch", def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices, KFolds, datasetFileIndex, databaseType, path, randomState, hyperParamSearch="randomizedSearch",
metrics=[["accuracy_score", None]], nIter=30, **args): metrics=[["accuracy_score", None]], nIter=30, **args):
DATASET = h5py.File(path+name+str(datasetFileIndex)+".hdf5", "r") DATASET = h5py.File(path+name+str(datasetFileIndex)+".hdf5", "r")
kwargs = args["args"] kwargs = args["args"]
...@@ -38,11 +38,11 @@ def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices, ...@@ -38,11 +38,11 @@ def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices,
neededViewIndex = views.index(kwargs["feat"]) neededViewIndex = views.index(kwargs["feat"])
X = DATASET.get("View"+str(neededViewIndex)) X = DATASET.get("View"+str(neededViewIndex))
Y = DATASET.get("Labels").value Y = DATASET.get("Labels").value
return ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFolds, 1, databaseType, path, statsIter, randomState, hyperParamSearch=hyperParamSearch, return ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFolds, 1, databaseType, path, randomState, hyperParamSearch=hyperParamSearch,
metrics=metrics, nIter=nIter, **args) metrics=metrics, nIter=nIter, **args)
def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFolds, nbCores, databaseType, path, statsIter, randomState, hyperParamSearch="randomizedSearch", def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFolds, nbCores, databaseType, path, randomState, hyperParamSearch="randomizedSearch",
metrics=[["accuracy_score", None]], nIter=30, **args): metrics=[["accuracy_score", None]], nIter=30, **args):
logging.debug("Start:\t Loading data") logging.debug("Start:\t Loading data")
try: try:
...@@ -115,7 +115,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol ...@@ -115,7 +115,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol
stringAnalysis, imagesAnalysis, metricsScores = execute(name, classificationIndices, KFolds, nbCores, hyperParamSearch, metrics, nIter, feat, CL_type, stringAnalysis, imagesAnalysis, metricsScores = execute(name, classificationIndices, KFolds, nbCores, hyperParamSearch, metrics, nIter, feat, CL_type,
clKWARGS, labelsNames, X.shape, clKWARGS, labelsNames, X.shape,
y_train, y_train_pred, y_test, y_test_pred, t_end, statsIter, randomState) y_train, y_train_pred, y_test, y_test_pred, t_end, randomState)
cl_desc = [value for key, value in sorted(clKWARGS.iteritems())] cl_desc = [value for key, value in sorted(clKWARGS.iteritems())]
logging.debug("Done:\t Getting Results") logging.debug("Done:\t Getting Results")
logging.info(stringAnalysis) logging.info(stringAnalysis)
...@@ -151,7 +151,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol ...@@ -151,7 +151,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol
logging.info("Done:\t Result Analysis") logging.info("Done:\t Result Analysis")
viewIndex = args["viewIndex"] viewIndex = args["viewIndex"]
return viewIndex, [CL_type, cl_desc+[feat], metricsScores, full_labels, cl_res] return viewIndex, [CL_type, cl_desc+[feat], metricsScores, full_labels, clKWARGS]
# # Classification Report with Precision, Recall, F1 , Support # # Classification Report with Precision, Recall, F1 , Support
# logging.debug("Info:\t Classification report:") # logging.debug("Info:\t Classification report:")
......
...@@ -43,7 +43,7 @@ def getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred): ...@@ -43,7 +43,7 @@ def getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred):
def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, feat, CL_type, clKWARGS, classLabelsNames, def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, feat, CL_type, clKWARGS, classLabelsNames,
shape, y_train, y_train_pred, y_test, y_test_pred, time, statsIter, randomState): shape, y_train, y_train_pred, y_test, y_test_pred, time, randomState):
metricsScores = {} metricsScores = {}
metricModule = getattr(Metrics, metrics[0][0]) metricModule = getattr(Metrics, metrics[0][0])
trainScore = metricModule.score(y_train, y_train_pred) trainScore = metricModule.score(y_train, y_train_pred)
...@@ -52,7 +52,7 @@ def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, fea ...@@ -52,7 +52,7 @@ def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, fea
# val = np.mean(testScores) # val = np.mean(testScores)
stdTrain = "nan" #np.std(trainScores) stdTrain = "nan" #np.std(trainScores)
stdTest = "nan" #np.std(testScores) stdTest = "nan" #np.std(testScores)
stringAnalysis = "Classification on "+name+" database for "+feat+" with "+CL_type+", random state is "+str(randomState)+", and "+str(statsIter)+" statistical iterations\n\n" stringAnalysis = "Classification on "+name+" database for "+feat+" with "+CL_type+", random state is "+str(randomState)+".\n\n"
stringAnalysis += metrics[0][0]+" on train : "+str(trainScore)+", with STD : "+str(stdTrain)+"\n"+metrics[0][0]+" on test : "+str(testScore)+", with STD : "+str(stdTest)+"\n\n" stringAnalysis += metrics[0][0]+" on train : "+str(trainScore)+", with STD : "+str(stdTrain)+"\n"+metrics[0][0]+" on test : "+str(testScore)+", with STD : "+str(stdTest)+"\n\n"
stringAnalysis += getDBConfigString(name, feat, learningRate, shape, classLabelsNames, KFolds) stringAnalysis += getDBConfigString(name, feat, learningRate, shape, classLabelsNames, KFolds)
stringAnalysis += getClassifierConfigString(CL_type, gridSearch, nbCores, nIter, clKWARGS) stringAnalysis += getClassifierConfigString(CL_type, gridSearch, nbCores, nIter, clKWARGS)
......
...@@ -21,28 +21,28 @@ __author__ = "Baptiste Bauvin" ...@@ -21,28 +21,28 @@ __author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype __status__ = "Prototype" # Production, Development, Prototype
def ExecMultiview_multicore(directory, coreIndex, name, learningRate, nbFolds, databaseType, path, LABELS_DICTIONARY, statsIter, randomState, def ExecMultiview_multicore(directory, coreIndex, name, learningRate, nbFolds, databaseType, path, LABELS_DICTIONARY, randomState,
hyperParamSearch=False, nbCores=1, metrics=None, nIter=30, **arguments): hyperParamSearch=False, nbCores=1, metrics=None, nIter=30, **arguments):
DATASET = h5py.File(path+name+str(coreIndex)+".hdf5", "r") DATASET = h5py.File(path+name+str(coreIndex)+".hdf5", "r")
return ExecMultiview(directory, DATASET, name, learningRate, nbFolds, 1, databaseType, path, LABELS_DICTIONARY, statsIter, randomState, return ExecMultiview(directory, DATASET, name, learningRate, nbFolds, 1, databaseType, path, LABELS_DICTIONARY, randomState,
hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=nIter, **arguments) hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=nIter, **arguments)
def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCores, databaseType, path, LABELS_DICTIONARY, statsIter, randomState, def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCores, databaseType, path, LABELS_DICTIONARY, randomState,
hyperParamSearch=False, metrics=None, nIter=30, **kwargs): hyperParamSearch=False, metrics=None, nIter=30, **kwargs):
datasetLength = DATASET.get("Metadata").attrs["datasetLength"] # datasetLength = DATASET.get("Metadata").attrs["datasetLength"]
NB_VIEW = kwargs["NB_VIEW"] # NB_VIEW = kwargs["NB_VIEW"]
views = kwargs["views"] views = kwargs["views"]
viewsIndices = kwargs["viewsIndices"] viewsIndices = kwargs["viewsIndices"]
NB_CLASS = DATASET.get("Metadata").attrs["nbClass"] # NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
if not metrics: if not metrics:
metrics = [["f1_score", None]] metrics = [["f1_score", None]]
metric = metrics[0] # metric = metrics[0]
CL_type = kwargs["CL_type"] CL_type = kwargs["CL_type"]
LABELS_NAMES = kwargs["LABELS_NAMES"] # LABELS_NAMES = kwargs["LABELS_NAMES"]
classificationKWARGS = kwargs[CL_type+"KWARGS"] classificationKWARGS = kwargs[CL_type+"KWARGS"]
learningRate = len(classificationIndices[0])/(len(classificationIndices[0])+len(classificationIndices[1])) learningRate = len(classificationIndices[0])/float((len(classificationIndices[0])+len(classificationIndices[1])))
t_start = time.time() t_start = time.time()
logging.info("### Main Programm for Multiview Classification") logging.info("### Main Programm for Multiview Classification")
logging.info("### Classification - Database : " + str(name) + " ; Views : " + ", ".join(views) + logging.info("### Classification - Database : " + str(name) + " ; Views : " + ", ".join(views) +
...@@ -65,13 +65,13 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor ...@@ -65,13 +65,13 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
classifierClass = getattr(classifierModule, CL_type) classifierClass = getattr(classifierModule, CL_type)
analysisModule = getattr(classifierPackage, "analyzeResults") analysisModule = getattr(classifierPackage, "analyzeResults")
logging.info("Train ratio : " + str(learningRate)) # logging.info("Train ratio : " + str(learningRate))
# iValidationIndices = [DB.splitDataset(DATASET, classificationIndices, datasetLength, randomState) for _ in range(statsIter)] # iValidationIndices = [DB.splitDataset(DATASET, classificationIndices, datasetLength, randomState) for _ in range(statsIter)]
# iLearningIndices = [[index for index in range(datasetLength) if index not in validationIndices] for validationIndices in iValidationIndices] # iLearningIndices = [[index for index in range(datasetLength) if index not in validationIndices] for validationIndices in iValidationIndices]
# iClassificationSetLength = [len(learningIndices) for learningIndices in iLearningIndices] # iClassificationSetLength = [len(learningIndices) for learningIndices in iLearningIndices]
# logging.info("Done:\t Determine validation split") # logging.info("Done:\t Determine validation split")
logging.info("CV On " + str(KFolds.n_splits) + " folds") # logging.info("CV On " + str(KFolds.n_splits) + " folds")
# if KFolds != 1: # if KFolds != 1:
# iKFolds = [DB.getKFoldIndices(KFolds, DATASET.get("Labels")[...], NB_CLASS, learningIndices, randomState) for learningIndices in iLearningIndices] # iKFolds = [DB.getKFoldIndices(KFolds, DATASET.get("Labels")[...], NB_CLASS, learningIndices, randomState) for learningIndices in iLearningIndices]
# else: # else:
...@@ -89,10 +89,15 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor ...@@ -89,10 +89,15 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
if hyperParamSearch != "None": if hyperParamSearch != "None":
classifier = searchBestSettings(DATASET, CL_type, metrics, learningIndices, KFolds, randomState, viewsIndices=viewsIndices, searchingTool=hyperParamSearch, nIter=nIter, **classificationKWARGS) classifier = searchBestSettings(DATASET, CL_type, metrics, learningIndices, KFolds, randomState, viewsIndices=viewsIndices, searchingTool=hyperParamSearch, nIter=nIter, **classificationKWARGS)
else: else:
classifier = classifierClass(NB_CORES=nbCores, **classificationKWARGS) classifier = classifierClass(randomState, NB_CORES=nbCores, **classificationKWARGS)
classifier.fit_hdf5(DATASET, trainIndices=learningIndices, viewsIndices=viewsIndices) classifier.fit_hdf5(DATASET, trainIndices=learningIndices, viewsIndices=viewsIndices)
trainLabels = classifier.predict_hdf5(DATASET, usedIndices=learningIndices, viewsIndices=viewsIndices) trainLabels = classifier.predict_hdf5(DATASET, usedIndices=learningIndices, viewsIndices=viewsIndices)
# try:
# if "MajorityVoting" == classificationKWARGS["fusionMethod"]:
# import pdb; pdb.set_trace()
# except:
# pass
testLabels = classifier.predict_hdf5(DATASET, usedIndices=validationIndices, viewsIndices=viewsIndices) testLabels = classifier.predict_hdf5(DATASET, usedIndices=validationIndices, viewsIndices=viewsIndices)
fullLabels = classifier.predict_hdf5(DATASET, viewsIndices=viewsIndices) fullLabels = classifier.predict_hdf5(DATASET, viewsIndices=viewsIndices)
# trainLabelsIterations.append(trainLabels) # trainLabelsIterations.append(trainLabels)
...@@ -113,7 +118,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor ...@@ -113,7 +118,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
classificationKWARGS, classificationIndices, classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times, LABELS_DICTIONARY, views, nbCores, times,
name, KFolds, name, KFolds,
hyperParamSearch, nIter, metrics, statsIter, hyperParamSearch, nIter, metrics,
viewsIndices, randomState) viewsIndices, randomState)
labelsSet = set(LABELS_DICTIONARY.values()) labelsSet = set(LABELS_DICTIONARY.values())
logging.info(stringAnalysis) logging.info(stringAnalysis)
......
...@@ -75,7 +75,7 @@ def getArgs(args, benchmark, views, viewsIndices, randomState, directory, result ...@@ -75,7 +75,7 @@ def getArgs(args, benchmark, views, viewsIndices, randomState, directory, result
fusionTypePackage = getattr(Methods, fusionType+"Package") fusionTypePackage = getattr(Methods, fusionType+"Package")
for fusionMethod in benchmark["Multiview"]["Fusion"]["Methods"][fusionType]: for fusionMethod in benchmark["Multiview"]["Fusion"]["Methods"][fusionType]:
fusionMethodModule = getattr(fusionTypePackage, fusionMethod) fusionMethodModule = getattr(fusionTypePackage, fusionMethod)
arguments = fusionMethodModule.getArgs(args, views, viewsIndices, directory, resultsMonoview) arguments = fusionMethodModule.getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview)
argumentsList+= arguments argumentsList+= arguments
return argumentsList return argumentsList
...@@ -86,7 +86,7 @@ def makeMonoviewData_hdf5(DATASET, weights=None, usedIndices=None, viewsIndices= ...@@ -86,7 +86,7 @@ def makeMonoviewData_hdf5(DATASET, weights=None, usedIndices=None, viewsIndices=
if not usedIndices: if not usedIndices:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
NB_VIEW = len(viewsIndices) NB_VIEW = len(viewsIndices)
if weights==None: if weights is None:
weights = np.array([1/NB_VIEW for i in range(NB_VIEW)]) weights = np.array([1/NB_VIEW for i in range(NB_VIEW)])
if sum(weights)!=1: if sum(weights)!=1:
weights = weights/sum(weights) weights = weights/sum(weights)
...@@ -168,7 +168,7 @@ class Fusion: ...@@ -168,7 +168,7 @@ class Fusion:
# return fusionType, fusionMethod, classifier # return fusionType, fusionMethod, classifier
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if type(viewsIndices)==type(None): if type(viewsIndices)==type(None):
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
...@@ -176,7 +176,7 @@ class Fusion: ...@@ -176,7 +176,7 @@ class Fusion:
return predictedLabels return predictedLabels
def predict_probas_hdf5(self, DATASET, usedIndices=None): def predict_probas_hdf5(self, DATASET, usedIndices=None):
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if usedIndices: if usedIndices:
predictedLabels = self.classifier.predict_probas_hdf5(DATASET, usedIndices=usedIndices) predictedLabels = self.classifier.predict_probas_hdf5(DATASET, usedIndices=usedIndices)
......
...@@ -18,13 +18,12 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -18,13 +18,12 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
return paramsSets return paramsSets
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
argumentsList = [] argumentsList = []
if args.FU_E_cl_names != ['']: if args.FU_E_cl_names != ['']:
pass pass
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames = benchmark["Monoview"]
if (not isPackage)]
args.FU_E_cl_names = monoviewClassifierModulesNames args.FU_E_cl_names = monoviewClassifierModulesNames
args.FU_E_cl_config = [None for _ in monoviewClassifierModulesNames] args.FU_E_cl_config = [None for _ in monoviewClassifierModulesNames]
for classifierName, classifierConfig in zip(args.FU_E_cl_names, args.FU_E_cl_config): for classifierName, classifierConfig in zip(args.FU_E_cl_names, args.FU_E_cl_config):
...@@ -85,7 +84,7 @@ class WeightedLinear(EarlyFusionClassifier): ...@@ -85,7 +84,7 @@ class WeightedLinear(EarlyFusionClassifier):
def __init__(self, randomState, NB_CORES=1, **kwargs): def __init__(self, randomState, NB_CORES=1, **kwargs):
EarlyFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'], EarlyFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'],
NB_CORES=NB_CORES) NB_CORES=NB_CORES)
if kwargs['fusionMethodConfig']==None: if kwargs['fusionMethodConfig'] is None:
self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float) self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float)
elif kwargs['fusionMethodConfig']==['']: elif kwargs['fusionMethodConfig']==['']:
self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float) self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float)
...@@ -112,7 +111,7 @@ class WeightedLinear(EarlyFusionClassifier): ...@@ -112,7 +111,7 @@ class WeightedLinear(EarlyFusionClassifier):
if type(viewsIndices)==type(None): if type(viewsIndices)==type(None):
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
self.weights = self.weights/float(np.sum(self.weights)) self.weights = self.weights/float(np.sum(self.weights))
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices, viewsIndices=viewsIndices) self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices, viewsIndices=viewsIndices)
predictedLabels = self.monoviewClassifier.predict(self.monoviewData) predictedLabels = self.monoviewClassifier.predict(self.monoviewData)
......
...@@ -23,7 +23,7 @@ def canProbasClassifier(classifierConfig): ...@@ -23,7 +23,7 @@ def canProbasClassifier(classifierConfig):
def fitMonoviewClassifier(classifierName, data, labels, classifierConfig, needProbas, randomState): def fitMonoviewClassifier(classifierName, data, labels, classifierConfig, needProbas, randomState):
if type(classifierConfig[0])==dict: if type(classifierConfig) == dict:
monoviewClassifier = getattr(MonoviewClassifiers, classifierName) monoviewClassifier = getattr(MonoviewClassifiers, classifierName)
if needProbas and not monoviewClassifier.canProbas(): if needProbas and not monoviewClassifier.canProbas():
monoviewClassifier = getattr(MonoviewClassifiers, "DecisionTree") monoviewClassifier = getattr(MonoviewClassifiers, "DecisionTree")
...@@ -35,14 +35,14 @@ def fitMonoviewClassifier(classifierName, data, labels, classifierConfig, needPr ...@@ -35,14 +35,14 @@ def fitMonoviewClassifier(classifierName, data, labels, classifierConfig, needPr
enumerate(classifierConfig enumerate(classifierConfig
))) )))
return classifier return classifier
else: # else:
if needProbas and not canProbasClassifier(classifierConfig): # if needProbas and not canProbasClassifier(classifierConfig):
monoviewClassifier = getattr(MonoviewClassifiers, "DecisionTree") # monoviewClassifier = getattr(MonoviewClassifiers, "DecisionTree")
DTConfig = {"0":300, "1":"entropy", "2":"random"} # DTConfig = {"0":300, "1":"entropy", "2":"random"}
classifier = monoviewClassifier.fit(data,labels, randomState,DTConfig) # classifier = monoviewClassifier.fit(data,labels, randomState,DTConfig)
return classifier # return classifier
else: # else:
return classifierConfig # return classifierConfig
...@@ -122,12 +122,12 @@ def getClassifiers(selectionMethodName, allClassifiersNames, directory, viewsInd ...@@ -122,12 +122,12 @@ def getClassifiers(selectionMethodName, allClassifiersNames, directory, viewsInd
def getConfig(classifiersNames, resultsMonoview): def getConfig(classifiersNames, resultsMonoview):
classifiers = [0 for _ in range(len(classifiersNames))] classifiersConfigs = [0 for _ in range(len(classifiersNames))]
for viewIndex, classifierName in enumerate(classifiersNames): for viewIndex, classifierName in enumerate(classifiersNames):
for resultMonoview in resultsMonoview: for resultMonoview in resultsMonoview:
if resultMonoview[0]==viewIndex and resultMonoview[1][0]==classifierName: if resultMonoview[0]==viewIndex and resultMonoview[1][0]==classifierName:
classifiers[viewIndex]=resultMonoview[1][4] classifiersConfigs[viewIndex]=resultMonoview[1][4]
return classifiers return classifiersConfigs
def jambon(fromage): def jambon(fromage):
pass pass
...@@ -149,7 +149,7 @@ class LateFusionClassifier(object): ...@@ -149,7 +149,7 @@ class LateFusionClassifier(object):
def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None): def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if type(viewsIndices)==type(None):
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
if trainIndices == None: if trainIndices is None:
trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
# monoviewSelectionMethod = locals()[self.monoviewSelection] # monoviewSelectionMethod = locals()[self.monoviewSelection]
# self.monoviewClassifiers = monoviewSelectionMethod() # self.monoviewClassifiers = monoviewSelectionMethod()
......
...@@ -25,24 +25,23 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -25,24 +25,23 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
# fusionMethodConfig = args.FU_method_config # fusionMethodConfig = args.FU_method_config
# return classifiersNames, classifiersConfig, fusionMethodConfig # return classifiersNames, classifiersConfig, fusionMethodConfig
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
if args.FU_L_cl_names!=['']: if args.FU_L_cl_names!=['']:
args.FU_L_select_monoview = "user_defined" args.FU_L_select_monoview = "user_defined"
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames = benchmark["Monoview"]
if (not isPackage)]
args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices) args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices)
monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName) monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName)
for classifierName in args.FU_L_cl_names] for classifierName in args.FU_L_cl_names]
if args.FU_L_cl_names==[""] and args.CL_type == ["Multiview"]:
raise AttributeError("You must perform Monoview classification or specify "
"which monoview classifier to use Late Fusion")
if args.FU_L_cl_config != ['']: if args.FU_L_cl_config != ['']:
classifiersConfigs = [monoviewClassifierModule.getKWARGS([arg.split(":") for arg in classifierConfig.split(",")]) classifiersConfigs = [monoviewClassifierModule.getKWARGS([arg.split(":") for arg in classifierConfig.split(",")])
for monoviewClassifierModule,classifierConfig for monoviewClassifierModule,classifierConfig
in zip(monoviewClassifierModules,args.FU_L_cl_config)] in zip(monoviewClassifierModules,args.FU_L_cl_config)]
else: else:
classifiersConfigs = getConfig(args.FU_L_cl_names, resultsMonoview) classifiersConfigs = getConfig(args.FU_L_cl_names, resultsMonoview)
if args.FU_L_cl_names==[""] and args.CL_type == ["Multiview"]:
raise AttributeError("You must perform Monoview classification or specify "
"which monoview classifier to use Late Fusion")
arguments = {"CL_type": "Fusion", arguments = {"CL_type": "Fusion",
"views": views, "views": views,
"NB_VIEW": len(views), "NB_VIEW": len(views),
...@@ -85,8 +84,8 @@ class BayesianInference(LateFusionClassifier): ...@@ -85,8 +84,8 @@ class BayesianInference(LateFusionClassifier):
NB_CORES=NB_CORES) NB_CORES=NB_CORES)
# self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0])) # self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
if kwargs['fusionMethodConfig'][0]==None or kwargs['fusionMethodConfig']==['']: if kwargs['fusionMethodConfig'][0] is None or kwargs['fusionMethodConfig']==['']:
self.weights = [1.0 for classifier in kwargs['classifiersNames']] self.weights = np.array([1.0 for classifier in kwargs['classifiersNames']])
else: else:
self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0])) self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
self.needProbas = True self.needProbas = True
...@@ -95,13 +94,14 @@ class BayesianInference(LateFusionClassifier): ...@@ -95,13 +94,14 @@ class BayesianInference(LateFusionClassifier):
self.weights = paramsSet[0] self.weights = paramsSet[0]
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
self.weights = self.weights/float(max(self.weights)) # self.weights /= float(max(self.weights))
nbView = len(viewsIndices) nbView = len(viewsIndices)
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if sum(self.weights)!=1.0: if sum(self.weights)!=1.0:
print self.weights
self.weights = self.weights/sum(self.weights) self.weights = self.weights/sum(self.weights)
viewScores = np.zeros((nbView, len(usedIndices), DATASET.get("Metadata").attrs["nbClass"])) viewScores = np.zeros((nbView, len(usedIndices), DATASET.get("Metadata").attrs["nbClass"]))
......
...@@ -16,12 +16,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -16,12 +16,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
return paramsSets return paramsSets
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
if args.FU_L_cl_names!=['']: if args.FU_L_cl_names!=['']:
pass pass
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames = benchmark["Monoview"]
if (not isPackage)]
args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices) args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices)
monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName) monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName)
for classifierName in args.FU_L_cl_names] for classifierName in args.FU_L_cl_names]
...@@ -71,24 +70,24 @@ class MajorityVoting(LateFusionClassifier): ...@@ -71,24 +70,24 @@ class MajorityVoting(LateFusionClassifier):
def __init__(self, randomState, NB_CORES=1, **kwargs): def __init__(self, randomState, NB_CORES=1, **kwargs):
LateFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'], kwargs["monoviewSelection"], LateFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'], kwargs["monoviewSelection"],
NB_CORES=NB_CORES) NB_CORES=NB_CORES)
if kwargs['fusionMethodConfig'][0]==None or kwargs['fusionMethodConfig']==['']: if kwargs['fusionMethodConfig'][0] is None or kwargs['fusionMethodConfig']==['']:
self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float) self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float)
else: else:
self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0])) self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
def setParams(self, paramsSet): def setParams(self, paramsSet):
self.weights = paramsSet[0] self.weights = np.array(paramsSet[0])
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if type(viewsIndices)==type(None):
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
nbView = len(viewsIndices) nbView = len(viewsIndices)
self.weights = self.weights/float(max(self.weights)) self.weights /= float(sum(self.weights))
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
datasetLength = len(usedIndices) datasetLength = len(usedIndices)
votes = np.zeros((datasetLength, DATASET.get("Metadata").attrs["nbClass"]), dtype=int) votes = np.zeros((datasetLength, DATASET.get("Metadata").attrs["nbClass"]), dtype=float)
monoViewDecisions = np.zeros((len(usedIndices),nbView), dtype=int) monoViewDecisions = np.zeros((len(usedIndices),nbView), dtype=int)
for index, viewIndex in enumerate(viewsIndices): for index, viewIndex in enumerate(viewsIndices):
monoViewDecisions[:, index] = self.monoviewClassifiers[index].predict( monoViewDecisions[:, index] = self.monoviewClassifiers[index].predict(
...@@ -112,7 +111,7 @@ class MajorityVoting(LateFusionClassifier): ...@@ -112,7 +111,7 @@ class MajorityVoting(LateFusionClassifier):
return predictedLabels return predictedLabels
def getConfig(self, fusionMethodConfig, monoviewClassifiersNames,monoviewClassifiersConfigs): def getConfig(self, fusionMethodConfig, monoviewClassifiersNames,monoviewClassifiersConfigs):
configString = "with Majority Voting \n\t-With monoview classifiers : " configString = "with Majority Voting \n\t-With weights : "+str(self.weights)+"\n\t-With monoview classifiers : "
for monoviewClassifierConfig, monoviewClassifierName in zip(monoviewClassifiersConfigs, monoviewClassifiersNames): for monoviewClassifierConfig, monoviewClassifierName in zip(monoviewClassifiersConfigs, monoviewClassifiersNames):
monoviewClassifierModule = getattr(MonoviewClassifiers, monoviewClassifierName) monoviewClassifierModule = getattr(MonoviewClassifiers, monoviewClassifierName)
configString += monoviewClassifierModule.getConfig(monoviewClassifierConfig) configString += monoviewClassifierModule.getConfig(monoviewClassifierConfig)
......
...@@ -27,12 +27,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -27,12 +27,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
return paramsSets return paramsSets
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
if args.FU_L_cl_names!=['']: if args.FU_L_cl_names!=['']:
pass pass
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames =benchmark["Monoview"]
if (not isPackage)]
args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices) args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices)
monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName) monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName)
for classifierName in args.FU_L_cl_names] for classifierName in args.FU_L_cl_names]
...@@ -87,7 +86,7 @@ class SCMForLinear(LateFusionClassifier): ...@@ -87,7 +86,7 @@ class SCMForLinear(LateFusionClassifier):
NB_CORES=NB_CORES) NB_CORES=NB_CORES)
self.SCMClassifier = None self.SCMClassifier = None
# self.config = kwargs['fusionMethodConfig'][0] # self.config = kwargs['fusionMethodConfig'][0]
if kwargs['fusionMethodConfig'][0]==None or kwargs['fusionMethodConfig']==['']: if kwargs['fusionMethodConfig'][0] is None or kwargs['fusionMethodConfig']==['']:
self.p = 1 self.p = 1
self.maxAttributes = 5 self.maxAttributes = 5
self.order = 1 self.order = 1
...@@ -105,28 +104,27 @@ class SCMForLinear(LateFusionClassifier): ...@@ -105,28 +104,27 @@ class SCMForLinear(LateFusionClassifier):
self.modelType = paramsSet[2] self.modelType = paramsSet[2]
def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None): def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
if trainIndices == None: if trainIndices is None:
trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if type(self.monoviewClassifiersConfigs[0])==dict: # if type(self.monoviewClassifiersConfigs[0])==dict:
for index, viewIndex in enumerate(viewsIndices): for index, viewIndex in enumerate(viewsIndices):
monoviewClassifier = getattr(MonoviewClassifiers, self.monoviewClassifiersNames[index]) monoviewClassifier = getattr(MonoviewClassifiers, self.monoviewClassifiersNames[index])
self.monoviewClassifiers.append( self.monoviewClassifiers.append(
monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices), monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices),
DATASET.get("Labels")[trainIndices], DATASET.get("Labels").value[trainIndices], self.randomState,
NB_CORES=self.nbCores, NB_CORES=self.nbCores,
**dict((str(configIndex), config) for configIndex, config in **self.monoviewClassifiersConfigs[index]))
enumerate(self.monoviewClassifiersConfigs[index])))) # else:
else: # self.monoviewClassifiers = self.monoviewClassifiersConfigs
self.monoviewClassifiers = self.monoviewClassifiersConfigs
self.SCMForLinearFusionFit(DATASET, usedIndices=trainIndices, viewsIndices=viewsIndices) self.SCMForLinearFusionFit(DATASET, usedIndices=trainIndices, viewsIndices=viewsIndices)
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
nbView = len(viewsIndices) nbView = len(viewsIndices)
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
monoviewDecisions = np.zeros((len(usedIndices), nbView), dtype=int) monoviewDecisions = np.zeros((len(usedIndices), nbView), dtype=int)
accus=[] accus=[]
......
...@@ -17,12 +17,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -17,12 +17,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
# def gridSearch(DATASET, classificationKWARGS, trainIndices, nIter=30, viewsIndices=None): # def gridSearch(DATASET, classificationKWARGS, trainIndices, nIter=30, viewsIndices=None):
# return None # return None
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
if args.FU_L_cl_names!=['']: if args.FU_L_cl_names!=['']:
pass pass
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames = benchmark["Monoview"]
if (not isPackage)]
args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices) args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices)
monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName) monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName)
for classifierName in args.FU_L_cl_names] for classifierName in args.FU_L_cl_names]
...@@ -54,16 +53,16 @@ class SVMForLinear(LateFusionClassifier): ...@@ -54,16 +53,16 @@ class SVMForLinear(LateFusionClassifier):
self.SVMClassifier = None self.SVMClassifier = None
def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None): def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
if trainIndices == None: if trainIndices is None:
trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if type(self.monoviewClassifiersConfigs[0])==dict: if type(self.monoviewClassifiersConfigs[0])==dict:
for index, viewIndex in enumerate(viewsIndices): for index, viewIndex in enumerate(viewsIndices):
monoviewClassifier = getattr(MonoviewClassifiers, self.monoviewClassifiersNames[index]) monoviewClassifier = getattr(MonoviewClassifiers, self.monoviewClassifiersNames[index])
self.monoviewClassifiers.append( self.monoviewClassifiers.append(
monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices), monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices),
DATASET.get("Labels")[trainIndices], DATASET.get("Labels").value[trainIndices], self.randomState,
NB_CORES=self.nbCores, NB_CORES=self.nbCores,
**dict((str(configIndex), config) for configIndex, config in **dict((str(configIndex), config) for configIndex, config in
enumerate(self.monoviewClassifiersConfigs[index])))) enumerate(self.monoviewClassifiersConfigs[index]))))
...@@ -75,10 +74,10 @@ class SVMForLinear(LateFusionClassifier): ...@@ -75,10 +74,10 @@ class SVMForLinear(LateFusionClassifier):
pass pass
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
nbView = len(viewsIndices) nbView = len(viewsIndices)
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
monoviewDecisions = np.zeros((len(usedIndices), nbView), dtype=int) monoviewDecisions = np.zeros((len(usedIndices), nbView), dtype=int)
for index, viewIndex in enumerate(viewsIndices): for index, viewIndex in enumerate(viewsIndices):
......
...@@ -16,12 +16,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1): ...@@ -16,12 +16,11 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
return paramsSets return paramsSets
def getArgs(args, views, viewsIndices, directory, resultsMonoview): def getArgs(benchmark, args, views, viewsIndices, directory, resultsMonoview):
if args.FU_L_cl_names!=['']: if args.FU_L_cl_names!=['']:
pass pass
else: else:
monoviewClassifierModulesNames = [name for _, name, isPackage in pkgutil.iter_modules(['MonoviewClassifiers']) monoviewClassifierModulesNames = benchmark["Monoview"]
if (not isPackage)]
args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices) args.FU_L_cl_names = getClassifiers(args.FU_L_select_monoview, monoviewClassifierModulesNames, directory, viewsIndices)
monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName) monoviewClassifierModules = [getattr(MonoviewClassifiers, classifierName)
for classifierName in args.FU_L_cl_names] for classifierName in args.FU_L_cl_names]
...@@ -51,7 +50,7 @@ class WeightedLinear(LateFusionClassifier): ...@@ -51,7 +50,7 @@ class WeightedLinear(LateFusionClassifier):
def __init__(self, randomState, NB_CORES=1, **kwargs): def __init__(self, randomState, NB_CORES=1, **kwargs):
LateFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'], kwargs["monoviewSelection"], LateFusionClassifier.__init__(self, randomState, kwargs['classifiersNames'], kwargs['classifiersConfigs'], kwargs["monoviewSelection"],
NB_CORES=NB_CORES) NB_CORES=NB_CORES)
if kwargs['fusionMethodConfig'][0]==None or kwargs['fusionMethodConfig']==['']: if kwargs['fusionMethodConfig'][0] is None or kwargs['fusionMethodConfig']==['']:
self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float) self.weights = np.ones(len(kwargs["classifiersNames"]), dtype=float)
else: else:
self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0])) self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
...@@ -61,11 +60,11 @@ class WeightedLinear(LateFusionClassifier): ...@@ -61,11 +60,11 @@ class WeightedLinear(LateFusionClassifier):
self.weights = paramsSet[0] self.weights = paramsSet[0]
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
nbView = len(viewsIndices) nbView = len(viewsIndices)
self.weights = self.weights/float(sum(self.weights)) self.weights = self.weights/float(sum(self.weights))
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
viewScores = np.zeros((nbView, len(usedIndices), DATASET.get("Metadata").attrs["nbClass"])) viewScores = np.zeros((nbView, len(usedIndices), DATASET.get("Metadata").attrs["nbClass"]))
for index, viewIndex in enumerate(viewsIndices): for index, viewIndex in enumerate(viewsIndices):
......
...@@ -37,27 +37,25 @@ def printMetricScore(metricScores, metrics): ...@@ -37,27 +37,25 @@ def printMetricScore(metricScores, metrics):
return metricScoreString return metricScoreString
def getTotalMetricScores(metric, trainLabels, testLabels, DATASET, validationIndices): def getTotalMetricScores(metric, trainLabels, testLabels, DATASET, validationIndices, learningIndices):
labels = DATASET.get("Labels").value labels = DATASET.get("Labels").value
DATASET_LENGTH = DATASET.get("Metadata").attrs["datasetLength"]
metricModule = getattr(Metrics, metric[0]) metricModule = getattr(Metrics, metric[0])
if metric[1]!=None: if metric[1]!=None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])) metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else: else:
metricKWARGS = {} metricKWARGS = {}
learningIndices = [index for index in range(DATASET_LENGTH) if index not in validationIndices]
trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS) trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS) testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
return [trainScore, testScore] return [trainScore, testScore]
def getMetricsScores(metrics, trainLabels, testLabels, def getMetricsScores(metrics, trainLabels, testLabels,
DATASET, validationIndices): DATASET, validationIndices, learningIndices):
metricsScores = {} metricsScores = {}
for metric in metrics: for metric in metrics:
metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels, metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
DATASET, validationIndices) DATASET, validationIndices, learningIndices)
return metricsScores return metricsScores
...@@ -66,7 +64,7 @@ def execute(classifier, trainLabels, ...@@ -66,7 +64,7 @@ def execute(classifier, trainLabels,
classificationKWARGS, classificationIndices, classificationKWARGS, classificationIndices,
LABELS_DICTIONARY, views, nbCores, times, LABELS_DICTIONARY, views, nbCores, times,
name, KFolds, name, KFolds,
hyperParamSearch, nIter, metrics, statsIter, hyperParamSearch, nIter, metrics,
viewsIndices, randomState): viewsIndices, randomState):
CLASS_LABELS = DATASET.get("Labels").value CLASS_LABELS = DATASET.get("Labels").value
...@@ -77,8 +75,8 @@ def execute(classifier, trainLabels, ...@@ -77,8 +75,8 @@ def execute(classifier, trainLabels,
monoviewClassifiersConfigs = classificationKWARGS["classifiersConfigs"] monoviewClassifiersConfigs = classificationKWARGS["classifiersConfigs"]
fusionMethodConfig = classificationKWARGS["fusionMethodConfig"] fusionMethodConfig = classificationKWARGS["fusionMethodConfig"]
DATASET_LENGTH = DATASET.get("Metadata").attrs["datasetLength"] # DATASET_LENGTH = DATASET.get("Metadata").attrs["datasetLength"]
NB_CLASS = DATASET.get("Metadata").attrs["nbClass"] # NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
# kFoldAccuracyOnTrain = np.zeros((nbFolds, statsIter)) # kFoldAccuracyOnTrain = np.zeros((nbFolds, statsIter))
# kFoldAccuracyOnTest = np.zeros((nbFolds, statsIter)) # kFoldAccuracyOnTest = np.zeros((nbFolds, statsIter))
# kFoldAccuracyOnValidation = np.zeros((nbFolds, statsIter)) # kFoldAccuracyOnValidation = np.zeros((nbFolds, statsIter))
...@@ -108,7 +106,7 @@ def execute(classifier, trainLabels, ...@@ -108,7 +106,7 @@ def execute(classifier, trainLabels,
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1])) metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1]))
else: else:
metricKWARGS = {} metricKWARGS = {}
scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices], trainLabels, **metricKWARGS) scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices], CLASS_LABELS[learningIndices], **metricKWARGS)
scoreOnTest = metricModule.score(CLASS_LABELS[validationIndices], testLabels, **metricKWARGS) scoreOnTest = metricModule.score(CLASS_LABELS[validationIndices], testLabels, **metricKWARGS)
fusionConfiguration = classifier.classifier.getConfig(fusionMethodConfig,monoviewClassifiersNames, monoviewClassifiersConfigs) fusionConfiguration = classifier.classifier.getConfig(fusionMethodConfig,monoviewClassifiersNames, monoviewClassifiersConfigs)
stringAnalysis = "\t\tResult for Multiview classification with "+ fusionType + " and random state : "+str(randomState)+ \ stringAnalysis = "\t\tResult for Multiview classification with "+ fusionType + " and random state : "+str(randomState)+ \
...@@ -120,9 +118,12 @@ def execute(classifier, trainLabels, ...@@ -120,9 +118,12 @@ def execute(classifier, trainLabels,
if fusionType=="LateFusion": if fusionType=="LateFusion":
stringAnalysis+=Methods.LateFusion.getScores(classifier) stringAnalysis+=Methods.LateFusion.getScores(classifier)
metricsScores = getMetricsScores(metrics, trainLabels, testLabels, metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
DATASET, validationIndices) DATASET, validationIndices, learningIndices)
stringAnalysis+=printMetricScore(metricsScores, metrics) # if fusionMethod=="MajorityVoting":
# print CLASS_LABELS[learningIndices]==CLASS_LABELS[learningIndices]
# import pdb;pdb.set_trace()
# stringAnalysis += "\n\nComputation time on " + str(nbCores) + " cores : \n\tDatabase extraction time : " + str( # stringAnalysis += "\n\nComputation time on " + str(nbCores) + " cores : \n\tDatabase extraction time : " + str(
stringAnalysis+=printMetricScore(metricsScores, metrics)
# hms(seconds=int(extractionTime))) + "\n\t" # hms(seconds=int(extractionTime))) + "\n\t"
# row_format = "{:>15}" * 3 # row_format = "{:>15}" * 3
# stringAnalysis += row_format.format("", *['Learn', 'Prediction']) # stringAnalysis += row_format.format("", *['Learn', 'Prediction'])
......
...@@ -204,9 +204,9 @@ class Mumbo: ...@@ -204,9 +204,9 @@ class Mumbo:
def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None): def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
NB_CLASS = DATASET.get("Metadata").attrs["nbClass"] NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if type(viewsIndices)==type(None): if viewsIndices is None:
viewsIndices = range(DATASET.get("Metadata").attrs["nbView"]) viewsIndices = range(DATASET.get("Metadata").attrs["nbView"])
viewDict = dict((viewIndex, index) for index, viewIndex in enumerate(viewsIndices)) viewDict = dict((viewIndex, index) for index, viewIndex in enumerate(viewsIndices))
...@@ -229,9 +229,8 @@ class Mumbo: ...@@ -229,9 +229,8 @@ class Mumbo:
def predict_proba_hdf5(self, DATASET, usedIndices=None): def predict_proba_hdf5(self, DATASET, usedIndices=None):
NB_CLASS = DATASET.get("Metadata").attrs["nbClass"] NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if usedIndices is not None:
DATASET_LENGTH = len(usedIndices) DATASET_LENGTH = len(usedIndices)
predictedProbas = np.zeros((DATASET_LENGTH, NB_CLASS)) predictedProbas = np.zeros((DATASET_LENGTH, NB_CLASS))
...@@ -240,8 +239,6 @@ class Mumbo: ...@@ -240,8 +239,6 @@ class Mumbo:
data = getV(DATASET, int(view), exampleIndex) data = getV(DATASET, int(view), exampleIndex)
predictedProbas[labelIndex, int(classifier.predict(np.array([data])))] += alpha[view] predictedProbas[labelIndex, int(classifier.predict(np.array([data])))] += alpha[view]
predictedProbas[labelIndex,:] = predictedProbas[labelIndex,:]/np.sum(predictedProbas[labelIndex,:]) predictedProbas[labelIndex,:] = predictedProbas[labelIndex,:]/np.sum(predictedProbas[labelIndex,:])
else:
predictedProbas = []
return predictedProbas return predictedProbas
def trainWeakClassifiers(self, DATASET, CLASS_LABELS, NB_CLASS, DATASET_LENGTH, NB_VIEW): def trainWeakClassifiers(self, DATASET, CLASS_LABELS, NB_CLASS, DATASET_LENGTH, NB_VIEW):
...@@ -482,9 +479,9 @@ class Mumbo: ...@@ -482,9 +479,9 @@ class Mumbo:
return np.transpose(predictedLabels) return np.transpose(predictedLabels)
def classifyMumbobyIter_hdf5(self, DATASET, fakeViewsIndicesDict, usedIndices=None, NB_CLASS=2): def classifyMumbobyIter_hdf5(self, DATASET, fakeViewsIndicesDict, usedIndices=None, NB_CLASS=2):
if usedIndices == None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if usedIndices: # if usedIndices is not None:
DATASET_LENGTH = len(usedIndices) DATASET_LENGTH = len(usedIndices)
predictedLabels = np.zeros((DATASET_LENGTH, self.maxIter)) predictedLabels = np.zeros((DATASET_LENGTH, self.maxIter))
votes = np.zeros((DATASET_LENGTH, NB_CLASS)) votes = np.zeros((DATASET_LENGTH, NB_CLASS))
...@@ -493,13 +490,13 @@ class Mumbo: ...@@ -493,13 +490,13 @@ class Mumbo:
votesByIter = np.zeros((DATASET_LENGTH, NB_CLASS)) votesByIter = np.zeros((DATASET_LENGTH, NB_CLASS))
for usedExampleIndex, exampleIndex in enumerate(usedIndices): for usedExampleIndex, exampleIndex in enumerate(usedIndices):
data = np.array([np.array(getV(DATASET,int(view), exampleIndex))]) data = np.array([np.array(getV(DATASET,int(view), int(exampleIndex)))])
votesByIter[usedExampleIndex, int(classifier.predict(data))] += alpha[fakeViewsIndicesDict[view]] votesByIter[usedExampleIndex, int(classifier.predict(data))] += alpha[fakeViewsIndicesDict[view]]
votes[usedExampleIndex] = votes[usedExampleIndex] + np.array(votesByIter[usedExampleIndex]) votes[usedExampleIndex] = votes[usedExampleIndex] + np.array(votesByIter[usedExampleIndex])
predictedLabels[usedExampleIndex, iterIndex] = np.argmax(votes[usedExampleIndex]) predictedLabels[usedExampleIndex, iterIndex] = np.argmax(votes[usedExampleIndex])
else: # else:
predictedLabels = [] # predictedLabels = []
for i in range(self.maxIter): # for i in range(self.maxIter):
predictedLabels.append([]) # predictedLabels.append([])
return np.transpose(predictedLabels) return np.transpose(predictedLabels)
...@@ -32,7 +32,7 @@ def genFusionName(type_, a, b, c): ...@@ -32,7 +32,7 @@ def genFusionName(type_, a, b, c):
if type_ == "Fusion" and a["fusionType"] != "EarlyFusion": if type_ == "Fusion" and a["fusionType"] != "EarlyFusion":
return "Late-"+str(a["fusionMethod"]) return "Late-"+str(a["fusionMethod"])
elif type_ == "Fusion" and a["fusionType"] != "LateFusion": elif type_ == "Fusion" and a["fusionType"] != "LateFusion":
return "Early-"+a["fusionMethod"]+"-"+a["classifiersNames"][0] return "Early-"+a["fusionMethod"]+"-"+a["classifiersNames"]
def genNamesFromRes(mono, multi): def genNamesFromRes(mono, multi):
...@@ -48,17 +48,17 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory): ...@@ -48,17 +48,17 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory):
nbResults = len(mono)+len(multi) nbResults = len(mono)+len(multi)
validationScores = [float(res[1][2][metric[0]][1]) for res in mono] validationScores = [float(res[1][2][metric[0]][1]) for res in mono]
validationScores += [float(scores[metric[0]][1]) for a, b, scores, c in multi] validationScores += [float(scores[metric[0]][1]) for a, b, scores, c in multi]
validationSTD = [float(res[1][2][metric[0]][3]) for res in mono] # validationSTD = [float(res[1][2][metric[0]][3]) for res in mono]
validationSTD += [float(scores[metric[0]][3]) for a, b, scores, c in multi] # validationSTD += [float(scores[metric[0]][3]) for a, b, scores, c in multi]
trainScores = [float(res[1][2][metric[0]][0]) for res in mono] trainScores = [float(res[1][2][metric[0]][0]) for res in mono]
trainScores += [float(scores[metric[0]][0]) for a, b, scores, c in multi] trainScores += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
trainSTD = [float(res[1][2][metric[0]][2]) for res in mono] # trainSTD = [float(res[1][2][metric[0]][2]) for res in mono]
trainSTD += [float(scores[metric[0]][2]) for a, b, scores, c in multi] # trainSTD += [float(scores[metric[0]][2]) for a, b, scores, c in multi]
validationScores = np.array(validationScores) validationScores = np.array(validationScores)
validationSTD = np.array(validationSTD) # validationSTD = np.array(validationSTD)
trainScores = np.array(trainScores) trainScores = np.array(trainScores)
trainSTD = np.array(trainSTD) # trainSTD = np.array(trainSTD)
names = np.array(names) names = np.array(names)
f = pylab.figure(figsize=(40, 30)) f = pylab.figure(figsize=(40, 30))
...@@ -72,14 +72,14 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory): ...@@ -72,14 +72,14 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory):
metricKWARGS = {} metricKWARGS = {}
sorted_indices = np.argsort(validationScores) sorted_indices = np.argsort(validationScores)
validationScores = validationScores[sorted_indices] validationScores = validationScores[sorted_indices]
validationSTD = validationSTD[sorted_indices] # validationSTD = validationSTD[sorted_indices]
trainScores = trainScores[sorted_indices] trainScores = trainScores[sorted_indices]
trainSTD = trainSTD[sorted_indices] # trainSTD = trainSTD[sorted_indices]
names = names[sorted_indices] names = names[sorted_indices]
ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" on validation set for each classifier") ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" on validation set for each classifier")
rects = ax.bar(range(nbResults), validationScores, width, color="r", yerr=validationSTD) rects = ax.bar(range(nbResults), validationScores, width, color="r", )#yerr=validationSTD)
rect2 = ax.bar(np.arange(nbResults)+width, trainScores, width, color="0.7", yerr=trainSTD) rect2 = ax.bar(np.arange(nbResults)+width, trainScores, width, color="0.7",)# yerr=trainSTD)
autolabel(rects, ax) autolabel(rects, ax)
autolabel(rect2, ax) autolabel(rect2, ax)
ax.legend((rects[0], rect2[0]), ('Test', 'Train')) ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
...@@ -87,7 +87,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory): ...@@ -87,7 +87,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory):
ax.set_xticklabels(names, rotation="vertical") ax.set_xticklabels(names, rotation="vertical")
f.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-"+metric[0]+".png") f.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-"+metric[0]+".png")
logging.info("Extraction time : "+str(times[0])+"s, Monoview time : "+str(times[1])+"s, Multiview Time : "+str(times[2])+"s")
def analyzeLabels(labelsArrays, realLabels, results, directory): def analyzeLabels(labelsArrays, realLabels, results, directory):
......
...@@ -27,11 +27,11 @@ def testVersions(): ...@@ -27,11 +27,11 @@ def testVersions():
print "Please install pyscm" print "Please install pyscm"
raise raise
try: # try:
import cv2 # import cv2
# print("OpenCV2-V.: " + cv2.__version__) # # print("OpenCV2-V.: " + cv2.__version__)
except: # except:
print "Please install cv2 module" # print "Please install cv2 module"
try: try:
import pandas import pandas
......
...@@ -3,7 +3,7 @@ import numpy as np ...@@ -3,7 +3,7 @@ import numpy as np
def getV(DATASET, viewIndex, usedIndices=None): def getV(DATASET, viewIndex, usedIndices=None):
if usedIndices==None: if usedIndices is None:
usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"]) usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
if type(usedIndices) is int: if type(usedIndices) is int:
return DATASET.get("View"+str(viewIndex))[usedIndices, :] return DATASET.get("View"+str(viewIndex))[usedIndices, :]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment