Skip to content
Snippets Groups Projects
Commit bcb38103 authored by bbauvin's avatar bbauvin
Browse files

Added trainIndices saving in csv, corrected error analysis fig size issue

parent 0ddb9366
No related branches found
No related tags found
No related merge requests found
......@@ -284,6 +284,7 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
benchmark,
views):
resultsMonoview = []
np.savetxt(directories+"train_indices.csv", classificationIndices[0], delimiter=",")
labelsNames = LABELS_DICTIONARY.values()
resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
coreIndex, args.type, args.pathF, randomState,
......@@ -327,8 +328,13 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory, args, classificationIndices, kFolds,
randomState, hyperParamSearch, metrics, DATASET, viewsIndices, dataBaseTime, start,
benchmark, views):
print classificationIndices[0]
import pdb;pdb.set_trace()
np.savetxt(directory+"train_indices.csv", classificationIndices[0], delimiter=",")
resultsMonoview = []
labelsNames = LABELS_DICTIONARY.values()
if nbCores > 1:
nbExperiments = len(argumentDictionaries["Monoview"])
for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))):
......@@ -436,7 +442,7 @@ groupStandard.add_argument('--randomState', metavar='STRING', action='store',
groupClass = parser.add_argument_group('Classification arguments')
groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
help='Determine the split between learning and validation sets', type=float,
default=0.3)
default=0.2)
groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
type=int, default=2)
groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
......
......@@ -80,7 +80,7 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=
plt.close()
def analyzeLabels(labelsArrays, realLabels, results, directory):
def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
mono, multi = results
classifiersNames = genNamesFromRes(mono, multi)
nbClassifiers = len(classifiersNames)
......@@ -91,14 +91,16 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
for classifierIndex in range(nbClassifiers):
for iterIndex in range(nbIter):
data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
figKW = {"figsize":(nbClassifiers/2, nbExamples/20)}
figWidth = max(nbClassifiers/2, minSize)
figHeight = max(nbExamples/20, minSize)
figKW = {"figsize":(figWidth, figHeight)}
fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
cmap = mpl.colors.ListedColormap(['red', 'green'])
bounds = [-0.5, 0.5, 1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto')
plt.title('Error on examples depending on the classifier')
plt.title('Errors depending on the classifier')
ticks = np.arange(0, nbClassifiers * nbIter, nbIter)
labels = classifiersNames
plt.xticks(ticks, labels, rotation="vertical")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment