Skip to content
Snippets Groups Projects
Commit 71d49e95 authored by bbauvin's avatar bbauvin
Browse files

Cleaned a bit

parent 5d02446e
Branches
Tags
No related merge requests found
...@@ -90,14 +90,7 @@ def getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, monoviewResults, mu ...@@ -90,14 +90,7 @@ def getExampleErrorsBiclass(usedBenchmarkArgumentDictionary, monoviewResults, mu
return exampleErrors return exampleErrors
def publishMetricsGraphs(metricsScores, directory, databaseName, labelsNames,minSize=10): def plotMetricOneIter(trainScores, testScores, names, nbResults, metricName, fileName, minSize=10):
for metricName, metricScores in metricsScores.items():
logging.debug("Start:\t Biclass score graph generation for "+metricName)
trainScores = metricScores["trainScores"]
testScores = metricScores["testScores"]
names = metricScores["classifiersNames"]
nbResults = len(testScores)
testScores = np.array(testScores) testScores = np.array(testScores)
trainScores = np.array(trainScores) trainScores = np.array(trainScores)
names = np.array(names) names = np.array(names)
...@@ -122,8 +115,19 @@ def publishMetricsGraphs(metricsScores, directory, databaseName, labelsNames,min ...@@ -122,8 +115,19 @@ def publishMetricsGraphs(metricsScores, directory, databaseName, labelsNames,min
ax.set_xticks(np.arange(nbResults) + barWidth) ax.set_xticks(np.arange(nbResults) + barWidth)
ax.set_xticklabels(names, rotation="vertical") ax.set_xticklabels(names, rotation="vertical")
plt.tight_layout() plt.tight_layout()
f.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + databaseName +"-"+"vs".join(labelsNames)+ "-" + metricName + ".png") f.savefig(fileName)
plt.close() plt.close()
def publishMetricsGraphs(metricsScores, directory, databaseName, labelsNames):
for metricName, metricScores in metricsScores.items():
logging.debug("Start:\t Biclass score graph generation for "+metricName)
trainScores = metricScores["trainScores"]
testScores = metricScores["testScores"]
names = metricScores["classifiersNames"]
nbResults = len(testScores)
fileName = directory + databaseName +"-"+"vs".join(labelsNames)+ "-" + metricName + ".png"
plotMetricOneIter(trainScores, testScores, names, nbResults, metricName, fileName)
logging.debug("Done:\t Biclass score graph generation for " + metricName) logging.debug("Done:\t Biclass score graph generation for " + metricName)
...@@ -139,6 +143,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min ...@@ -139,6 +143,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min
for iterIndex in range(nbIter): for iterIndex in range(nbIter):
data[:, classifierIndex * nbIter + iterIndex] = errorOnExamples data[:, classifierIndex * nbIter + iterIndex] = errorOnExamples
temp_data[:,classifierIndex] = errorOnExamples temp_data[:,classifierIndex] = errorOnExamples
figWidth = max(nbClassifiers/2, minSize) figWidth = max(nbClassifiers/2, minSize)
figHeight = max(nbExamples/20, minSize) figHeight = max(nbExamples/20, minSize)
figKW = {"figsize":(figWidth, figHeight)} figKW = {"figsize":(figWidth, figHeight)}
...@@ -147,7 +152,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min ...@@ -147,7 +152,7 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min
bounds = [-100.5,-0.5, 0.5, 1.5] bounds = [-100.5,-0.5, 0.5, 1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N) norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto') plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto')
plt.title('Errors depending on the classifier') plt.title('Errors depending on the classifier')
ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter) ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter)
labels = classifiersNames labels = classifiersNames
...@@ -155,22 +160,27 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min ...@@ -155,22 +160,27 @@ def publishExampleErrors(exampleErrors, directory, databaseName, labelsNames,min
red_patch = mpatches.Patch(color='red', label='Classifier failed') red_patch = mpatches.Patch(color='red', label='Classifier failed')
green_patch = mpatches.Patch(color='green', label='Classifier succeded') green_patch = mpatches.Patch(color='green', label='Classifier succeded')
black_patch = mpatches.Patch(color='black', label='Unseen data') black_patch = mpatches.Patch(color='black', label='Unseen data')
plt.legend(handles=[red_patch, green_patch, black_patch], bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",mode="expand", borderaxespad=0, ncol=3) plt.legend(handles=[red_patch, green_patch, black_patch],
bbox_to_anchor=(0,1.02,1,0.2),
loc="lower left",
mode="expand",
borderaxespad=0,
ncol=3)
fig.tight_layout() fig.tight_layout()
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + databaseName +"-"+"vs".join(labelsNames)+ "-error_analysis.png", bbox_inches="tight") fig.savefig(directory + databaseName +"-"+"vs".join(labelsNames)+ "-error_analysis.png", bbox_inches="tight")
plt.close() plt.close()
logging.debug("Done:\t Biclass Label analysis figure generation") logging.debug("Done:\t Biclass Label analysis figure generation")
logging.debug("Start:\t Biclass Error by example figure generation") logging.debug("Start:\t Biclass Error by example figure generation")
errorOnExamples = -1*np.sum(data, axis=1)/nbIter+nbClassifiers errorOnExamples = -1*np.sum(data, axis=1)/nbIter+nbClassifiers
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-clf_errors_doubled.csv", data, delimiter=",") np.savetxt(directory + "-clf_errors_doubled.csv", data, delimiter=",")
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.csv", temp_data, delimiter=",") np.savetxt(directory + "-example_errors.csv", temp_data, delimiter=",")
fig, ax = plt.subplots() fig, ax = plt.subplots()
x = np.arange(nbExamples) x = np.arange(nbExamples)
plt.bar(x, errorOnExamples) plt.bar(x, errorOnExamples)
plt.ylim([0,nbClassifiers]) plt.ylim([0,nbClassifiers])
plt.title("Number of classifiers that failed to classify each example") plt.title("Number of classifiers that failed to classify each example")
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + databaseName +"-"+"vs".join(labelsNames)+ "-example_errors.png") fig.savefig(directory + databaseName +"-"+"vs".join(labelsNames)+ "-example_errors.png")
plt.close() plt.close()
logging.debug("Done:\t Biclass Error by example figure generation") logging.debug("Done:\t Biclass Error by example figure generation")
...@@ -660,199 +670,200 @@ def getResults(results, statsIter, nbMulticlass, benchmarkArgumentDictionaries, ...@@ -660,199 +670,200 @@ def getResults(results, statsIter, nbMulticlass, benchmarkArgumentDictionaries,
def genFusionName(type_, a, b, c):
"""Used to generate fusion classifiers names"""
if type_ == "Fusion" and a["fusionType"] != "EarlyFusion":
return "Late-" + str(a["fusionMethod"])
elif type_ == "Fusion" and a["fusionType"] != "LateFusion":
return "Early-" + a["fusionMethod"] + "-" + a["classifiersNames"]
def genNamesFromRes(mono, multi):
"""Used to generate classifiers names list (inthe right order) from mono- and multi-view preds"""
names = [res[1][0] + "-" + res[1][1][-1] for res in mono]
names += [type_ if type_ != "Fusion" else genFusionName(type_, a, b, c) for type_, a, b, c in multi]
return names
def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=10):
"""Used to generate bar graphs of all the classifiers scores for each metric """
mono, multi = results
for metric in metrics:
logging.debug("Start:\t Score graph generation for "+metric[0])
names = genNamesFromRes(mono, multi)
nbResults = len(mono) + len(multi)
validationScores = [float(res[1][2][metric[0]][1]) for res in mono]
validationScores += [float(scores[metric[0]][1]) for a, b, scores, c in multi]
trainScores = [float(res[1][2][metric[0]][0]) for res in mono]
trainScores += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
validationScores = np.array(validationScores)
trainScores = np.array(trainScores)
names = np.array(names)
sorted_indices = np.argsort(validationScores)
validationScores = validationScores[sorted_indices]
trainScores = trainScores[sorted_indices]
names = names[sorted_indices]
size = nbResults
if nbResults < minSize:
size = minSize
figKW = {"figsize" : (size, 3.0/4*size+2.0)}
f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
barWidth= 0.35
ax.set_title(metric[0] + "\n on validation set for each classifier")
rects = ax.bar(range(nbResults), validationScores, barWidth, color="r", )
rect2 = ax.bar(np.arange(nbResults) + barWidth, trainScores, barWidth, color="0.7", )
autolabel(rects, ax)
autolabel(rect2, ax)
ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
ax.set_ylim(-0.1, 1.1)
ax.set_xticks(np.arange(nbResults) + barWidth)
ax.set_xticklabels(names, rotation="vertical")
plt.tight_layout()
f.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-" + metric[0] + ".png")
plt.close()
logging.debug("Done:\t Score graph generation for " + metric[0])
def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
"""Used to generate a graph showing errors on each example depending on classifier"""
logging.debug("Start:\t Label analysis figure generation")
mono, multi = results
classifiersNames = genNamesFromRes(mono, multi)
nbClassifiers = len(classifiersNames)
nbExamples = realLabels.shape[0]
nbIter = 2
data = np.zeros((nbExamples, nbClassifiers * nbIter))
tempData = np.array([labelsArray == realLabels for labelsArray in np.transpose(labelsArrays)]).astype(int)
for classifierIndex in range(nbClassifiers):
for iterIndex in range(nbIter):
data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
figWidth = max(nbClassifiers/2, minSize)
figHeight = max(nbExamples/20, minSize)
figKW = {"figsize":(figWidth, figHeight)}
fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
cmap = mpl.colors.ListedColormap(['red', 'green'])
bounds = [-0.5, 0.5, 1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto') # def genFusionName(type_, a, b, c):
plt.title('Errors depending on the classifier') # """Used to generate fusion classifiers names"""
ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter) # if type_ == "Fusion" and a["fusionType"] != "EarlyFusion":
labels = classifiersNames # return "Late-" + str(a["fusionMethod"])
plt.xticks(ticks, labels, rotation="vertical") # elif type_ == "Fusion" and a["fusionType"] != "LateFusion":
cbar = fig.colorbar(cax, ticks=[0, 1]) # return "Early-" + a["fusionMethod"] + "-" + a["classifiersNames"]
cbar.ax.set_yticklabels(['Wrong', ' Right']) #
fig.tight_layout() #
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png") # def genNamesFromRes(mono, multi):
plt.close() # """Used to generate classifiers names list (inthe right order) from mono- and multi-view preds"""
logging.debug("Done:\t Label analysis figure generation") # names = [res[1][0] + "-" + res[1][1][-1] for res in mono]
# names += [type_ if type_ != "Fusion" else genFusionName(type_, a, b, c) for type_, a, b, c in multi]
logging.debug("Start:\t Error by example figure generation") # return names
errorOnExamples = -1*np.sum(data, axis=1)/nbIter+nbClassifiers #
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-clf_errors.csv", data, delimiter=",") #
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.csv", errorOnExamples, delimiter=",") # def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=10):
fig, ax = plt.subplots() # """Used to generate bar graphs of all the classifiers scores for each metric """
x = np.arange(nbExamples) # mono, multi = results
plt.bar(x, errorOnExamples) # for metric in metrics:
plt.ylim([0,nbClassifiers]) # logging.debug("Start:\t Score graph generation for "+metric[0])
plt.title("Number of classifiers that failed to classify each example") # names = genNamesFromRes(mono, multi)
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.png") # nbResults = len(mono) + len(multi)
plt.close() # validationScores = [float(res[1][2][metric[0]][1]) for res in mono]
logging.debug("Done:\t Error by example figure generation") # validationScores += [float(scores[metric[0]][1]) for a, b, scores, c in multi]
return data # trainScores = [float(res[1][2][metric[0]][0]) for res in mono]
# trainScores += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
#
def analyzeIterLabels(labelsAnalysisList, directory, classifiersNames, minSize=10): # validationScores = np.array(validationScores)
"""Used to generate a graph showing errors on each example depending on classifierusing a score # trainScores = np.array(trainScores)
if multiple iterations""" # names = np.array(names)
logging.debug("Start:\t Global label analysis figure generation") # sorted_indices = np.argsort(validationScores)
nbExamples = labelsAnalysisList[0].shape[0] # validationScores = validationScores[sorted_indices]
nbClassifiers = len(classifiersNames) # trainScores = trainScores[sorted_indices]
nbIter = 2 # names = names[sorted_indices]
#
figWidth = max(nbClassifiers / 2, minSize) # size = nbResults
figHeight = max(nbExamples / 20, minSize) # if nbResults < minSize:
figKW = {"figsize": (figWidth, figHeight)} # size = minSize
fig, ax = plt.subplots(nrows=1, ncols=1, **figKW) # figKW = {"figsize" : (size, 3.0/4*size+2.0)}
data = sum(labelsAnalysisList) # f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
cax = plt.imshow(-data, interpolation='none', cmap="Greys", aspect='auto') # barWidth= 0.35
plt.title('Errors depending on the classifier') # ax.set_title(metric[0] + "\n on validation set for each classifier")
ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter) # rects = ax.bar(range(nbResults), validationScores, barWidth, color="r", )
plt.xticks(ticks, classifiersNames, rotation="vertical") # rect2 = ax.bar(np.arange(nbResults) + barWidth, trainScores, barWidth, color="0.7", )
cbar = fig.colorbar(cax, ticks=[0, -len(labelsAnalysisList)]) # autolabel(rects, ax)
cbar.ax.set_yticklabels(['Always Wrong', 'Always Right']) # autolabel(rect2, ax)
fig.tight_layout() # ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png") # ax.set_ylim(-0.1, 1.1)
plt.close() # ax.set_xticks(np.arange(nbResults) + barWidth)
logging.debug("Done:\t Global label analysis figure generation") # ax.set_xticklabels(names, rotation="vertical")
logging.debug("Start:\t Global error by example figure generation") # plt.tight_layout()
errorOnExamples = -1 * np.sum(data, axis=1) / nbIter + (nbClassifiers*len(labelsAnalysisList)) # f.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-" + metric[0] + ".png")
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-clf_errors.csv", data, delimiter=",") # plt.close()
np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.csv", errorOnExamples, delimiter=",") # logging.debug("Done:\t Score graph generation for " + metric[0])
fig, ax = plt.subplots() #
x = np.arange(nbExamples) #
plt.bar(x, errorOnExamples) # def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
plt.ylim([0,nbClassifiers*len(labelsAnalysisList)]) # """Used to generate a graph showing errors on each example depending on classifier"""
plt.title("Number of classifiers that failed to classify each example") # logging.debug("Start:\t Label analysis figure generation")
fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.png") # mono, multi = results
plt.close() # classifiersNames = genNamesFromRes(mono, multi)
logging.debug("Done:\t Global error by example figure generation") # nbClassifiers = len(classifiersNames)
# nbExamples = realLabels.shape[0]
# nbIter = 2
def genFig(iterResults, metric, nbResults, names, nbMono, minSize=10): # data = np.zeros((nbExamples, nbClassifiers * nbIter))
"""Used to generate the bar graph representing the mean scores of each classifiers if multiple iteration # tempData = np.array([labelsArray == realLabels for labelsArray in np.transpose(labelsArrays)]).astype(int)
with different random states""" # for classifierIndex in range(nbClassifiers):
nbIter = len(iterResults) # for iterIndex in range(nbIter):
validationScores = np.zeros((nbIter, nbResults)) # data[:, classifierIndex * nbIter + iterIndex] = tempData[classifierIndex, :]
trainScores = np.zeros((nbIter, nbResults)) # figWidth = max(nbClassifiers/2, minSize)
for iterIndex, iterResult in enumerate(iterResults): # figHeight = max(nbExamples/20, minSize)
mono, multi = iterResult # figKW = {"figsize":(figWidth, figHeight)}
validationScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][1]) for res in mono]) # fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
validationScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][1]) for a, b, scores, c in multi]) # cmap = mpl.colors.ListedColormap(['red', 'green'])
trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono]) # bounds = [-0.5, 0.5, 1.5]
trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi]) # norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#
validationSTDs = np.std(validationScores, axis=0) # cax = plt.imshow(data, interpolation='none', cmap=cmap, norm=norm, aspect='auto')
trainSTDs = np.std(trainScores, axis=0) # plt.title('Errors depending on the classifier')
validationMeans = np.mean(validationScores, axis=0) # ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter)
trainMeans = np.mean(trainScores, axis=0) # labels = classifiersNames
size=nbResults # plt.xticks(ticks, labels, rotation="vertical")
if nbResults<minSize: # cbar = fig.colorbar(cax, ticks=[0, 1])
size=minSize # cbar.ax.set_yticklabels(['Wrong', ' Right'])
figKW = {"figsize" : (size, 3.0/4*size+2.0)} # fig.tight_layout()
f, ax = plt.subplots(nrows=1, ncols=1, **figKW) # fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png")
barWidth = 0.35 # the width of the bars # plt.close()
sorted_indices = np.argsort(validationMeans) # logging.debug("Done:\t Label analysis figure generation")
validationMeans = validationMeans[sorted_indices] #
validationSTDs = validationSTDs[sorted_indices] # logging.debug("Start:\t Error by example figure generation")
trainSTDs = trainSTDs[sorted_indices] # errorOnExamples = -1*np.sum(data, axis=1)/nbIter+nbClassifiers
trainMeans = trainMeans[sorted_indices] # np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-clf_errors.csv", data, delimiter=",")
names = np.array(names)[sorted_indices] # np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.csv", errorOnExamples, delimiter=",")
# fig, ax = plt.subplots()
ax.set_title(metric[0] + " for each classifier") # x = np.arange(nbExamples)
rects = ax.bar(range(nbResults), validationMeans, barWidth, color="r", yerr=validationSTDs) # plt.bar(x, errorOnExamples)
rect2 = ax.bar(np.arange(nbResults) + barWidth, trainMeans, barWidth, color="0.7", yerr=trainSTDs) # plt.ylim([0,nbClassifiers])
autolabel(rects, ax) # plt.title("Number of classifiers that failed to classify each example")
autolabel(rect2, ax) # fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.png")
ax.set_ylim(-0.1, 1.1) # plt.close()
ax.legend((rects[0], rect2[0]), ('Test', 'Train')) # logging.debug("Done:\t Error by example figure generation")
ax.set_xticks(np.arange(nbResults) + barWidth) # return data
ax.set_xticklabels(names, rotation="vertical") #
f.tight_layout() #
# def analyzeIterLabels(labelsAnalysisList, directory, classifiersNames, minSize=10):
return f # """Used to generate a graph showing errors on each example depending on classifierusing a score
# if multiple iterations"""
# logging.debug("Start:\t Global label analysis figure generation")
def analyzeIterResults(iterResults, name, metrics, directory): # nbExamples = labelsAnalysisList[0].shape[0]
nbResults = len(iterResults[0][0]) + len(iterResults[0][1]) # nbClassifiers = len(classifiersNames)
nbMono = len(iterResults[0][0]) # nbIter = 2
nbIter = len(iterResults) #
names = genNamesFromRes(iterResults[0][0], iterResults[0][1]) # figWidth = max(nbClassifiers / 2, minSize)
for metric in metrics: # figHeight = max(nbExamples / 20, minSize)
logging.debug("Start:\t Global score graph generation for " + metric[0]) # figKW = {"figsize": (figWidth, figHeight)}
figure = genFig(iterResults, metric, nbResults, names, nbMono) # fig, ax = plt.subplots(nrows=1, ncols=1, **figKW)
figure.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-Mean_on_" # data = sum(labelsAnalysisList)
+ str(nbIter) + "_iter-" + metric[0] + ".png") # cax = plt.imshow(-data, interpolation='none', cmap="Greys", aspect='auto')
logging.debug("Done:\t Global score graph generation for " + metric[0]) # plt.title('Errors depending on the classifier')
# ticks = np.arange(nbIter/2-0.5, nbClassifiers * nbIter, nbIter)
# plt.xticks(ticks, classifiersNames, rotation="vertical")
# cbar = fig.colorbar(cax, ticks=[0, -len(labelsAnalysisList)])
# cbar.ax.set_yticklabels(['Always Wrong', 'Always Right'])
# fig.tight_layout()
# fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-error_analysis.png")
# plt.close()
# logging.debug("Done:\t Global label analysis figure generation")
# logging.debug("Start:\t Global error by example figure generation")
# errorOnExamples = -1 * np.sum(data, axis=1) / nbIter + (nbClassifiers*len(labelsAnalysisList))
# np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-clf_errors.csv", data, delimiter=",")
# np.savetxt(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.csv", errorOnExamples, delimiter=",")
# fig, ax = plt.subplots()
# x = np.arange(nbExamples)
# plt.bar(x, errorOnExamples)
# plt.ylim([0,nbClassifiers*len(labelsAnalysisList)])
# plt.title("Number of classifiers that failed to classify each example")
# fig.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-example_errors.png")
# plt.close()
# logging.debug("Done:\t Global error by example figure generation")
#
#
# def genFig(iterResults, metric, nbResults, names, nbMono, minSize=10):
# """Used to generate the bar graph representing the mean scores of each classifiers if multiple iteration
# with different random states"""
# nbIter = len(iterResults)
# validationScores = np.zeros((nbIter, nbResults))
# trainScores = np.zeros((nbIter, nbResults))
# for iterIndex, iterResult in enumerate(iterResults):
# mono, multi = iterResult
# validationScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][1]) for res in mono])
# validationScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][1]) for a, b, scores, c in multi])
# trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono])
# trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi])
#
# validationSTDs = np.std(validationScores, axis=0)
# trainSTDs = np.std(trainScores, axis=0)
# validationMeans = np.mean(validationScores, axis=0)
# trainMeans = np.mean(trainScores, axis=0)
# size=nbResults
# if nbResults<minSize:
# size=minSize
# figKW = {"figsize" : (size, 3.0/4*size+2.0)}
# f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
# barWidth = 0.35 # the width of the bars
# sorted_indices = np.argsort(validationMeans)
# validationMeans = validationMeans[sorted_indices]
# validationSTDs = validationSTDs[sorted_indices]
# trainSTDs = trainSTDs[sorted_indices]
# trainMeans = trainMeans[sorted_indices]
# names = np.array(names)[sorted_indices]
#
# ax.set_title(metric[0] + " for each classifier")
# rects = ax.bar(range(nbResults), validationMeans, barWidth, color="r", yerr=validationSTDs)
# rect2 = ax.bar(np.arange(nbResults) + barWidth, trainMeans, barWidth, color="0.7", yerr=trainSTDs)
# autolabel(rects, ax)
# autolabel(rect2, ax)
# ax.set_ylim(-0.1, 1.1)
# ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
# ax.set_xticks(np.arange(nbResults) + barWidth)
# ax.set_xticklabels(names, rotation="vertical")
# f.tight_layout()
#
# return f
#
#
# def analyzeIterResults(iterResults, name, metrics, directory):
# nbResults = len(iterResults[0][0]) + len(iterResults[0][1])
# nbMono = len(iterResults[0][0])
# nbIter = len(iterResults)
# names = genNamesFromRes(iterResults[0][0], iterResults[0][1])
# for metric in metrics:
# logging.debug("Start:\t Global score graph generation for " + metric[0])
# figure = genFig(iterResults, metric, nbResults, names, nbMono)
# figure.savefig(directory + time.strftime("%Y%m%d-%H%M%S") + "-" + name + "-Mean_on_"
# + str(nbIter) + "_iter-" + metric[0] + ".png")
# logging.debug("Done:\t Global score graph generation for " + metric[0])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment