Skip to content
Snippets Groups Projects
Commit 4b090294 authored by bbauvin's avatar bbauvin
Browse files

Everything seems to be working

parent 89207d22
Branches
Tags
No related merge requests found
...@@ -447,7 +447,7 @@ groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store' ...@@ -447,7 +447,7 @@ groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store'
groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int, groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
default=2) default=2)
groupClass.add_argument('--CL_statsiter', metavar='INT', action='store', groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
help='Number of iteration for each algorithm to mean results', type=int, help="Number of iteration for each algorithm to mean results if using multiple cores, it's highly recommended to use statsiter mod(nbCores) = 0", type=int,
default=2) default=2)
groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+", groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+",
help='Determine which metrics to use, separate metric and configuration with ":".' help='Determine which metrics to use, separate metric and configuration with ":".'
......
...@@ -106,22 +106,18 @@ def analyzeLabels(labelsArrays, realLabels, results, directory): ...@@ -106,22 +106,18 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
fig.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-error_analysis.png") fig.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-error_analysis.png")
def genScoresNames(iterResults, metric, nbResults, names): def genScoresNames(iterResults, metric, nbResults, names, nbMono):
validationScores = [] nbIter = len(iterResults)
trainScores = [] validationScores = np.zeros((nbIter, nbResults))
for iterindex, iterResult in enumerate(iterResults): trainScores = np.zeros((nbIter, nbResults))
for iterIndex, iterResult in enumerate(iterResults):
mono, multi = iterResult mono, multi = iterResult
import pdb;pdb.set_trace() validationScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][1]) for res in mono])
validationScores[iterindex] = [float(res[1][2][metric[0]][1]) for res in mono] validationScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][1]) for a, b, scores, c in multi])
validationScores[iterindex] += [float(scores[metric[0]][1]) for a, b, scores, c in multi] trainScores[iterIndex, :nbMono] = np.array([float(res[1][2][metric[0]][0]) for res in mono])
trainScores[iterindex] = [float(res[1][1][2][metric[0]][0]) for res in mono] trainScores[iterIndex, nbMono:] = np.array([float(scores[metric[0]][0]) for a, b, scores, c in multi])
trainScores[iterindex] += [float(scores[metric[0]][0]) for a, b, scores, c in multi]
validationScores[iterindex] = np.array(validationScores)
trainScores[iterindex] = np.array(trainScores)
validationScores = np.array(validationScores) # import pdb;pdb.set_trace()
trainScores = np.array(trainScores)
validationSTDs = np.std(validationScores, axis=0) validationSTDs = np.std(validationScores, axis=0)
trainSTDs = np.std(trainScores, axis=0) trainSTDs = np.std(trainScores, axis=0)
validationMeans = np.mean(validationScores, axis=0) validationMeans = np.mean(validationScores, axis=0)
...@@ -138,8 +134,10 @@ def genScoresNames(iterResults, metric, nbResults, names): ...@@ -138,8 +134,10 @@ def genScoresNames(iterResults, metric, nbResults, names):
metricKWARGS = {} metricKWARGS = {}
sorted_indices = np.argsort(validationMeans) sorted_indices = np.argsort(validationMeans)
validationMeans = validationMeans[sorted_indices] validationMeans = validationMeans[sorted_indices]
validationSTDs = validationSTDs[sorted_indices]
trainSTDs = trainSTDs[sorted_indices]
trainMeans = trainMeans[sorted_indices] trainMeans = trainMeans[sorted_indices]
names = names[sorted_indices] names = np.array(names)[sorted_indices]
ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" for each classifier") ax.set_title(getattr(Metrics, metric[0]).getConfig(**metricKWARGS)+" for each classifier")
rects = ax.bar(range(nbResults), validationMeans, width, color="r", yerr=validationSTDs) rects = ax.bar(range(nbResults), validationMeans, width, color="r", yerr=validationSTDs)
...@@ -155,9 +153,11 @@ def genScoresNames(iterResults, metric, nbResults, names): ...@@ -155,9 +153,11 @@ def genScoresNames(iterResults, metric, nbResults, names):
def analyzeIterResults(iterResults, name, metrics, directory): def analyzeIterResults(iterResults, name, metrics, directory):
nbResults = len(iterResults[0][0])+len(iterResults[0][1]) nbResults = len(iterResults[0][0])+len(iterResults[0][1])
nbMono = len(iterResults[0][0])
nbMulti = len(iterResults[0][1])
nbIter = len(iterResults) nbIter = len(iterResults)
names = genNamesFromRes(iterResults[0][0], iterResults[0][1]) names = genNamesFromRes(iterResults[0][0], iterResults[0][1])
for metric in metrics: for metric in metrics:
figure = genScoresNames(iterResults, metric, nbResults, names) figure = genScoresNames(iterResults, metric, nbResults, names, nbMono)
figure.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-Mean_on_" figure.savefig(directory+time.strftime("%Y%m%d-%H%M%S")+"-"+name+"-Mean_on_"
+str(nbIter)+"_iter-"+metric[0]+".png") +str(nbIter)+"_iter-"+metric[0]+".png")
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment