diff --git a/Code/MonoMultiViewClassifiers/ExecClassif.py b/Code/MonoMultiViewClassifiers/ExecClassif.py index 4283c7003a1ddb65da641bf5bcecd2b6524de22c..df869b6806a0403f36de7d3379a62c8ce7c9a57e 100644 --- a/Code/MonoMultiViewClassifiers/ExecClassif.py +++ b/Code/MonoMultiViewClassifiers/ExecClassif.py @@ -845,7 +845,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, class except OSError as exc: if exc.errno != errno.EEXIST: raise - trainIndices, testIndices = classificationIndices + trainIndices = classificationIndices[0] trainLabels = labels[trainIndices] np.savetxt(directory + "train_labels.csv", trainLabels, delimiter=",") resultsMonoview = [] @@ -883,7 +883,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=Non except OSError as exc: if exc.errno != errno.EEXIST: raise - trainIndices, testIndices = classificationIndices + trainIndices = classificationIndices[0] trainLabels = labels[trainIndices] np.savetxt(directory + "train_labels.csv", trainLabels, delimiter=",") np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",") @@ -929,7 +929,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None, directory=Non except OSError as exc: if exc.errno != errno.EEXIST: raise - trainIndices, testIndices = classificationIndices + trainIndices = classificationIndices[0] trainLabels = labels[trainIndices] np.savetxt(directory + "train_labels.csv", trainLabels, delimiter=",") resultsMonoview = [] @@ -1023,7 +1023,7 @@ def execClassif(arguments): classificationIndices = execution.genSplits(DATASET.get("Labels").value, args.CL_split, statsIterRandomStates) - multiclassLabels, labelsCombinations, oldIndicesMulticlass = Multiclass.genMulticlassLabels(DATASET.get("Labels").value, multiclassMethod, classificationIndices) + multiclassLabels, labelsCombinations, indicesMulticlass = Multiclass.genMulticlassLabels(DATASET.get("Labels").value, multiclassMethod, classificationIndices) kFolds = execution.genKFolds(statsIter, args.CL_nbFolds, statsIterRandomStates) @@ -1060,7 +1060,7 @@ def execClassif(arguments): initKWARGS) directories = execution.genDirecortiesNames(directory, statsIter) benchmarkArgumentDictionaries = execution.genArgumentDictionaries(LABELS_DICTIONARY, directories, multiclassLabels, - labelsCombinations, oldIndicesMulticlass, + labelsCombinations, indicesMulticlass, hyperParamSearch, args, kFolds, statsIterRandomStates, metrics, argumentDictionaries, benchmark, nbViews, views) diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py index fa0a4eba0a6cfd0055d45a3ab35bc121cccfe00e..0926a3e313e36131f81eef1023964d257d56e715 100644 --- a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py +++ b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py @@ -49,12 +49,13 @@ def initConstants(args, X, classificationIndices, labelsNames, name, directory): def initTrainTest(X, Y, classificationIndices): - trainIndices, testIndices = classificationIndices + trainIndices, testIndices, testIndicesMulticlass = classificationIndices X_train = extractSubset(X, trainIndices) X_test = extractSubset(X, testIndices) + X_test_multiclass = extractSubset(X, testIndicesMulticlass) y_train = Y[trainIndices] y_test = Y[testIndices] - return X_train, y_train, X_test, y_test + return X_train, y_train, X_test, y_test, X_test_multiclass def getKWARGS(classifierModule, hyperParamSearch, nIter, CL_type, X_train, y_train, randomState, @@ -127,7 +128,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol + str(nbCores) + ", algorithm : " + CL_type) logging.debug("Start:\t Determine Train/Test split") - X_train, y_train, X_test, y_test = initTrainTest(X, Y, classificationIndices) + X_train, y_train, X_test, y_test, X_test_multiclass = initTrainTest(X, Y, classificationIndices) logging.debug("Info:\t Shape X_train:" + str(X_train.shape) + ", Length of y_train:" + str(len(y_train))) logging.debug("Info:\t Shape X_test:" + str(X_test.shape) + ", Length of y_test:" + str(len(y_test))) logging.debug("Done:\t Determine Train/Test split") @@ -145,13 +146,14 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol logging.debug("Done:\t Training") logging.debug("Start:\t Predicting") + y_train_pred = cl_res.predict(X_train) + y_test_pred = cl_res.predict(X_test) full_labels_pred = np.zeros(Y.shape, dtype=int)-100 - y_train_pred = cl_res.predict(X[classificationIndices[0]]) - y_test_pred = cl_res.predict(X[classificationIndices[1]]) for trainIndex, index in enumerate(classificationIndices[0]): full_labels_pred[index] = y_train_pred[trainIndex] for testIndex, index in enumerate(classificationIndices[1]): full_labels_pred[index] = y_test_pred[testIndex] + y_test_multiclass_pred = cl_res.predict(X_test_multiclass) logging.debug("Done:\t Predicting") @@ -174,7 +176,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol logging.info("Done:\t Saving Results") viewIndex = args["viewIndex"] - return viewIndex, [CL_type, cl_desc + [feat], metricsScores, full_labels_pred, clKWARGS] + return viewIndex, [CL_type, cl_desc + [feat], metricsScores, full_labels_pred, clKWARGS, y_test_multiclass_pred] if __name__ == '__main__': diff --git a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py index d8fbd7a0e4fab0fa56b75ddddc48afb63a8da776..388ebc70ef9768251c047ed4250bfd5eb3e2c0de 100644 --- a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py +++ b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py @@ -95,7 +95,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor logging.info("Info:\t Extraction duration "+str(extractionTime)+"s") logging.debug("Start:\t Getting train/test split") - learningIndices, validationIndices = classificationIndices + learningIndices, validationIndices, testIndicesMulticlass = classificationIndices logging.debug("Done:\t Getting train/test split") logging.debug("Start:\t Getting classifiers modules") @@ -129,6 +129,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor fullLabels[index] = trainLabels[trainIndex] for testIndex, index in enumerate(validationIndices): fullLabels[index] = testLabels[testIndex] + testLabelsMulticlass = classifier.predict_hdf5(DATASET, usedIndices=testIndicesMulticlass, viewsIndices=viewsIndices) logging.info("Done:\t Pertidcting") classificationTime = time.time() - t_start @@ -157,4 +158,4 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor learningRate, name, imagesAnalysis) logging.debug("Start:\t Saving preds") - return CL_type, classificationKWARGS, metricsScores, fullLabels + return CL_type, classificationKWARGS, metricsScores, fullLabels, testLabelsMulticlass diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py index 80dafb3e9bc860d26fb7d107983106b5d8ef1b2c..abc5ecb46ccbc647eca2cffa4b70443eb0e0a8f8 100644 --- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py +++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py @@ -66,7 +66,7 @@ def execute(classifier, trainLabels, monoviewClassifiersConfigs = classificationKWARGS["classifiersConfigs"] fusionMethodConfig = classificationKWARGS["fusionMethodConfig"] - learningIndices, validationIndices = classificationIndices + learningIndices, validationIndices, testIndicesMulticlass = classificationIndices metricModule = getattr(Metrics, metrics[0][0]) if metrics[0][1] is not None: metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1])) diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py index 269c5b1261536144341fc82cb77d65c758c5d1b2..85ac7a12e101480cd2f18c7fc950e25dd30c1d34 100644 --- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py +++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py @@ -116,7 +116,7 @@ def getAlgoConfig(classifier, classificationKWARGS, nbCores, viewNames, hyperPar def getReport(classifier, CLASS_LABELS, classificationIndices, DATASET, trainLabels, testLabels, viewIndices, metric): - learningIndices, validationIndices = classificationIndices + learningIndices, validationIndices, multiviewTestIndices = classificationIndices nbView = len(viewIndices) NB_CLASS = DATASET.get("Metadata").attrs["nbClass"] metricModule = getattr(Metrics, metric[0]) @@ -224,7 +224,7 @@ def execute(classifier, trainLabels, databaseName, KFolds, hyperParamSearch, nIter, metrics, viewsIndices, randomState, labels): - learningIndices, validationIndices = classificationIndices + learningIndices, validationIndices, testIndicesMulticlass = classificationIndices if classifier.classifiersConfigs is None: metricsScores = getMetricsScores(metrics, trainLabels, testLabels, validationIndices, learningIndices, labels) diff --git a/Code/MonoMultiViewClassifiers/utils/Multiclass.py b/Code/MonoMultiViewClassifiers/utils/Multiclass.py index c10eb2707a0d8e7d15934d616c4d4818e3a7db39..9bbd793f3409f6e62d8a8173c81d5eed12b1c3ed 100644 --- a/Code/MonoMultiViewClassifiers/utils/Multiclass.py +++ b/Code/MonoMultiViewClassifiers/utils/Multiclass.py @@ -21,7 +21,8 @@ def genMulticlassLabels(labels, multiclassMethod, classificationIndices): for iterIndices in classificationIndices] testIndices = [np.array([oldIndex for oldIndex in oldIndices if oldIndex in iterindices[1]]) for iterindices in classificationIndices] - indicesMulticlass.append([trainIndices, testIndices]) + testIndicesMulticlass = [np.array(iterindices[1]) for iterindices in classificationIndices] + indicesMulticlass.append([trainIndices, testIndices, testIndicesMulticlass]) newLabels = np.zeros(len(labels), dtype=int)-100 for labelIndex, label in enumerate(labels): if label == combination[0]: diff --git a/Code/MonoMultiViewClassifiers/utils/execution.py b/Code/MonoMultiViewClassifiers/utils/execution.py index fd28b0c3b6865194ed969869a9fb16efcd362a25..e7f55fced030dfeca8d2f898f82ebc4c2078f03c 100644 --- a/Code/MonoMultiViewClassifiers/utils/execution.py +++ b/Code/MonoMultiViewClassifiers/utils/execution.py @@ -296,7 +296,7 @@ def genDirecortiesNames(directory, statsIter): return directories -def genArgumentDictionaries(labelsDictionary, directories, multiclassLabels, labelsCombinations, oldIndicesMulticlass, hyperParamSearch, args, +def genArgumentDictionaries(labelsDictionary, directories, multiclassLabels, labelsCombinations, indicesMulticlass, hyperParamSearch, args, kFolds, statsIterRandomStates, metrics, argumentDictionaries, benchmark, nbViews, views): benchmarkArgumentDictionaries = [] for combinationIndex, labelsCombination in enumerate(labelsCombinations): @@ -307,7 +307,9 @@ def genArgumentDictionaries(labelsDictionary, directories, multiclassLabels, lab labelsDictionary[labelsCombination[0]]+ "vs"+ labelsDictionary[labelsCombination[1]]+"/", - "classificationIndices": oldIndicesMulticlass[combinationIndex][iterIndex], + "classificationIndices": [indicesMulticlass[combinationIndex][0][iterIndex], + indicesMulticlass[combinationIndex][1][iterIndex], + indicesMulticlass[combinationIndex][2][iterIndex]], "args": args, "labels": multiclassLabels[combinationIndex], "kFolds": kFolds[iterIndex],