Skip to content
Snippets Groups Projects
Commit bcb38103 authored by bbauvin's avatar bbauvin
Browse files

Added trainIndices saving in csv, corrected error analysis fig size issue

parent 0ddb9366
No related branches found
No related tags found
No related merge requests found
...@@ -284,6 +284,7 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores, ...@@ -284,6 +284,7 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
benchmark, benchmark,
views): views):
resultsMonoview = [] resultsMonoview = []
np.savetxt(directories+"train_indices.csv", classificationIndices[0], delimiter=",")
labelsNames = LABELS_DICTIONARY.values() labelsNames = LABELS_DICTIONARY.values()
resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds, resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
coreIndex, args.type, args.pathF, randomState, coreIndex, args.type, args.pathF, randomState,
...@@ -327,8 +328,13 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores, ...@@ -327,8 +328,13 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory, args, classificationIndices, kFolds, def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory, args, classificationIndices, kFolds,
randomState, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start, randomState, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start,
benchmark, views): benchmark, views):
print classificationIndices[0]
import pdb;pdb.set_trace()
np.savetxt(directory+"train_indices.csv", classificationIndices[0], delimiter=",")
resultsMonoview = [] resultsMonoview = []
labelsNames = LABELS_DICTIONARY.values() labelsNames = LABELS_DICTIONARY.values()
if nbCores > 1: if nbCores > 1:
nbExperiments = len(argumentDictionaries["Monoview"]) nbExperiments = len(argumentDictionaries["Monoview"])
for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))): for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))):
...@@ -436,7 +442,7 @@ groupStandard.add_argument('--randomState', metavar='STRING', action='store', ...@@ -436,7 +442,7 @@ groupStandard.add_argument('--randomState', metavar='STRING', action='store',
groupClass = parser.add_argument_group('Classification arguments') groupClass = parser.add_argument_group('Classification arguments')
groupClass.add_argument('--CL_split', metavar='FLOAT', action='store', groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
help='Determine the split between learning and validation sets', type=float, help='Determine the split between learning and validation sets', type=float,
default=0.3) default=0.2)
groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation', groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
type=int, default=2) type=int, default=2)
groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int, groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
......
...@@ -80,7 +80,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize= ...@@ -80,7 +80,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=
plt.close() plt.close()
def analyzeLabels(labelsArrays, realLabels, results, directory): def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
mono, multi = results mono, multi = results
classifiersNames = genNamesFromRes(mono, multi) classifiersNames = genNamesFromRes(mono, multi)
nbClassifiers = len(classifiersNames) nbClassifiers = len(classifiersNames)
...@@ -91,14 +91,16 @@ def analyzeLabels(labelsArrays, realLabels, results, directory): ...@@ -91,14 +91,16 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
for classifierIndex in range(nbClassifiers): for classifierIndex in range(nbClassifiers):
for iterIndex in range(nbIter): for iterIndex in range(nbIter):
data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :] data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
figKW = {"figsize":(nbClassifiers/2, nbExamples/20)} figWidth = max(nbClassifiers/2, minSize)
figHeight = max(nbExamples/20, minSize)
figKW = {"figsize":(figWidth, figHeight)}
fig, ax = plt.subplots(nrows=1, ncols=1, **figKW) fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
cmap = mpl.colors.ListedColormap(['red', 'green']) cmap = mpl.colors.ListedColormap(['red', 'green'])
bounds = [-0.5, 0.5, 1.5] bounds = [-0.5, 0.5, 1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N) norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto') cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto')
plt.title('Error on examples depending on the classifier') plt.title('Errors depending on the classifier')
ticks = np.arange(0, nbClassifiers * nbIter, nbIter) ticks = np.arange(0, nbClassifiers * nbIter, nbIter)
labels = classifiersNames labels = classifiersNames
plt.xticks(ticks, labels, rotation="vertical") plt.xticks(ticks, labels, rotation="vertical")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment