Skip to content
Snippets Groups Projects
Commit 0ddb9366 authored by bbauvin's avatar bbauvin
Browse files

Added dynamic size for figs

parent e3e36cb1
No related branches found
No related tags found
No related merge requests found
...@@ -42,7 +42,7 @@ def genNamesFromRes(mono, multi): ...@@ -42,7 +42,7 @@ def genNamesFromRes(mono, multi):
return names return names
def resultAnalysis(benchmark, results, name, times, metrics, directory): def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=10):
mono, multi = results mono, multi = results
for metric in metrics: for metric in metrics:
names = genNamesFromRes(mono, multi) names = genNamesFromRes(mono, multi)
...@@ -55,30 +55,27 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory): ...@@ -55,30 +55,27 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory):
validationScores = np.array(validationScores) validationScores = np.array(validationScores)
trainScores = np.array(trainScores) trainScores = np.array(trainScores)
names = np.array(names) names = np.array(names)
size = nbResults
f = pylab.figure(figsize=(40, 30)) if nbResults < minSize:
width = 0.35 # the width of the bars size = minSize
fig = plt.gcf() figKW = {"figsize" : (size, 3.0/4*size+2.0)}
fig.subplots_adjust(bottom=105.0, top=105.01) f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
ax = f.add_axes([0.1, 0.1, 0.8, 0.8]) barWidth= 0.35
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
sorted_indices = np.argsort(validationScores) sorted_indices = np.argsort(validationScores)
validationScores = validationScores[sorted_indices] validationScores = validationScores[sorted_indices]
trainScores = trainScores[sorted_indices] trainScores = trainScores[sorted_indices]
names = names[sorted_indices] names = names[sorted_indices]
ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS) + " on validation set for each classifier") ax.set_title(metric[0] + "\n on validation set for each classifier")
rects = ax.bar(range(nbResults), validationScores, width, color="r", ) rects = ax.bar(range(nbResults), validationScores, barWidth, color="r", )
rect2 = ax.bar(np.arange(nbResults) + width, trainScores, width, color="0.7", ) rect2 = ax.bar(np.arange(nbResults) + barWidth, trainScores, barWidth, color="0.7", )
autolabel(rects, ax) autolabel(rects, ax)
autolabel(rect2, ax) autolabel(rect2, ax)
ax.legend((rects[0], rect2[0]), ('Test', 'Train')) ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
ax.set_xticks(np.arange(nbResults) + width) ax.set_ylim(-0.1, 1.1)
ax.set_xticks(np.arange(nbResults) + barWidth)
ax.set_xticklabels(names, rotation="vertical") ax.set_xticklabels(names, rotation="vertical")
plt.tight_layout()
f.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-" + metric[0] + ".png") f.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-" + metric[0] + ".png")
plt.close() plt.close()
...@@ -94,7 +91,8 @@ def analyzeLabels(labelsArrays, realLabels, results, directory): ...@@ -94,7 +91,8 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
for classifierIndex in range(nbClassifiers): for classifierIndex in range(nbClassifiers):
for iterIndex in range(nbIter): for iterIndex in range(nbIter):
data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :] data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
fig = pylab.figure(figsize=(10, 20)) figKW = {"figsize":(nbClassifiers/2, nbExamples/20)}
fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
cmap = mpl.colors.ListedColormap(['red', 'green']) cmap = mpl.colors.ListedColormap(['red', 'green'])
bounds = [-0.5, 0.5, 1.5] bounds = [-0.5, 0.5, 1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N) norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
...@@ -106,11 +104,12 @@ def analyzeLabels(labelsArrays, realLabels, results, directory): ...@@ -106,11 +104,12 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
plt.xticks(ticks, labels, rotation="vertical") plt.xticks(ticks, labels, rotation="vertical")
cbar = fig.colorbar(cax, ticks=[0, 1]) cbar = fig.colorbar(cax, ticks=[0, 1])
cbar.ax.set_yticklabels(['Wrong', ' Right']) cbar.ax.set_yticklabels(['Wrong', ' Right'])
fig.tight_layout()
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png") fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png")
plt.close() plt.close()
def genScoresNames(iterResults, metric, nbResults, names, nbMono): def genScoresNames(iterResults, metric, nbResults, names, nbMono, minSize=10):
nbIter = len(iterResults) nbIter = len(iterResults)
validationScores = np.zeros((nbIter, nbResults)) validationScores = np.zeros((nbIter, nbResults))
trainScores = np.zeros((nbIter, nbResults)) trainScores = np.zeros((nbIter, nbResults))
...@@ -121,21 +120,16 @@ def genScoresNames(iterResults, metric, nbResults, names, nbMono): ...@@ -121,21 +120,16 @@ def genScoresNames(iterResults, metric, nbResults, names, nbMono):
trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono]) trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono])
trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi]) trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi])
# import pdb;pdb.set_trace()
validationSTDs = np.std(validationScores, axis=0) validationSTDs = np.std(validationScores, axis=0)
trainSTDs = np.std(trainScores, axis=0) trainSTDs = np.std(trainScores, axis=0)
validationMeans = np.mean(validationScores, axis=0) validationMeans = np.mean(validationScores, axis=0)
trainMeans = np.mean(trainScores, axis=0) trainMeans = np.mean(trainScores, axis=0)
size=nbResults
f = pylab.figure(figsize=(40, 30)) if nbResults<minSize:
width = 0.35 # the width of the bars size=minSize
fig = plt.gcf() figKW = {"figsize" : (size, 3.0/4*size+2.0)}
fig.subplots_adjust(bottom=105.0, top=105.01) f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
ax = f.add_axes([0.1, 0.1, 0.8, 0.8]) barWidth = 0.35 # the width of the bars
if metric[1] is not None:
metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
else:
metricKWARGS = {}
sorted_indices = np.argsort(validationMeans) sorted_indices = np.argsort(validationMeans)
validationMeans = validationMeans[sorted_indices] validationMeans = validationMeans[sorted_indices]
validationSTDs = validationSTDs[sorted_indices] validationSTDs = validationSTDs[sorted_indices]
...@@ -143,14 +137,16 @@ def genScoresNames(iterResults, metric, nbResults, names, nbMono): ...@@ -143,14 +137,16 @@ def genScoresNames(iterResults, metric, nbResults, names, nbMono):
trainMeans = trainMeans[sorted_indices] trainMeans = trainMeans[sorted_indices]
names = np.array(names)[sorted_indices] names = np.array(names)[sorted_indices]
ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS) + " for each classifier") ax.set_title(metric[0] + " for each classifier")
rects = ax.bar(range(nbResults), validationMeans, width, color="r", yerr=validationSTDs) rects = ax.bar(range(nbResults), validationMeans, barWidth, color="r", yerr=validationSTDs)
rect2 = ax.bar(np.arange(nbResults) + width, trainMeans, width, color="0.7", yerr=trainSTDs) rect2 = ax.bar(np.arange(nbResults) + barWidth, trainMeans, barWidth, color="0.7", yerr=trainSTDs)
autolabel(rects, ax) autolabel(rects, ax)
autolabel(rect2, ax) autolabel(rect2, ax)
ax.set_ylim(-0.1, 1.1)
ax.legend((rects[0], rect2[0]), ('Test', 'Train')) ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
ax.set_xticks(np.arange(nbResults) + width) ax.set_xticks(np.arange(nbResults) + barWidth)
ax.set_xticklabels(names, rotation="vertical") ax.set_xticklabels(names, rotation="vertical")
f.tight_layout()
return f return f
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment