Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Supervised MultiModal Integration Tool
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Baptiste Bauvin
Supervised MultiModal Integration Tool
Commits
89207d22
Commit
89207d22
authored
7 years ago
by
bbauvin
Browse files
Options
Downloads
Patches
Plain Diff
Working on global analysis
parent
4e34c21a
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
Code/MonoMutliViewClassifiers/ExecClassif.py
+23
-18
23 additions, 18 deletions
Code/MonoMutliViewClassifiers/ExecClassif.py
Code/MonoMutliViewClassifiers/ResultAnalysis.py
+58
-1
58 additions, 1 deletion
Code/MonoMutliViewClassifiers/ResultAnalysis.py
with
81 additions
and
19 deletions
Code/MonoMutliViewClassifiers/ExecClassif.py
+
23
−
18
View file @
89207d22
...
...
@@ -24,7 +24,7 @@ from Multiview.ExecMultiview import ExecMultiview, ExecMultiview_multicore
from
Monoview.ExecClassifMonoView
import
ExecMonoview
,
ExecMonoview_multicore
import
Multiview.GetMultiviewDb
as
DB
from
Versions
import
testVersions
from
ResultAnalysis
import
resultAnalysis
,
analyzeLabels
from
ResultAnalysis
import
resultAnalysis
,
analyzeLabels
,
analyzeIterResults
# Author-Info
__author__
=
"
Baptiste Bauvin
"
...
...
@@ -278,8 +278,9 @@ def genDirecortiesNames(directory, statsIter):
def
classifyOneIter_multicore
(
LABELS_DICTIONARY
,
argumentDictionaries
,
nbCores
,
directory
,
args
,
classificationIndices
,
kFolds
,
randomState
,
hyperParamSearch
,
metrics
,
coreIndex
,
viewsIndices
,
dataBaseTime
,
start
,
benchmark
,
views
):
resultsMonoview
=
[]
labelsNames
=
LABELS_DICTIONARY
.
values
()
resultsMonoview
=
[
ExecMonoview_multicore
(
directory
,
args
.
name
,
labelsNames
,
classificationIndices
,
kFolds
,
resultsMonoview
+
=
[
ExecMonoview_multicore
(
directory
,
args
.
name
,
labelsNames
,
classificationIndices
,
kFolds
,
coreIndex
,
args
.
type
,
args
.
pathF
,
randomState
,
hyperParamSearch
=
hyperParamSearch
,
metrics
=
metrics
,
nIter
=
args
.
CL_GS_iter
,
...
...
@@ -289,7 +290,8 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
argumentDictionaries
=
initMultiviewArguments
(
args
,
benchmark
,
views
,
viewsIndices
,
argumentDictionaries
,
randomState
,
directory
,
resultsMonoview
)
resultsMultiview
=
[
resultsMultiview
=
[]
resultsMultiview
+=
[
ExecMultiview_multicore
(
directory
,
coreIndex
,
args
.
name
,
classificationIndices
,
kFolds
,
args
.
type
,
args
.
pathF
,
LABELS_DICTIONARY
,
randomState
,
hyperParamSearch
=
hyperParamSearch
,
metrics
=
metrics
,
nIter
=
args
.
CL_GS_iter
,
**
arguments
)
...
...
@@ -302,17 +304,17 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
trueLabels
=
DATASET
.
get
(
"
Labels
"
).
value
times
=
[
dataBaseTime
,
monoviewTime
,
multiviewTime
]
results
=
(
resultsMonoview
,
resultsMultiview
)
analyzeLabels
(
labels
,
trueLabels
,
results
,
directory
)
logging
.
debug
(
"
Start:
\t
Analyze Global Results
"
)
resultAnalysis
(
benchmark
,
results
,
args
.
name
,
times
,
metrics
,
directory
)
logging
.
debug
(
"
Done:
\t
Analyze Global Results
"
)
globalAnalysisTime
=
time
.
time
()
-
monoviewTime
-
dataBaseTime
-
start
-
multiviewTime
totalTime
=
time
.
time
()
-
start
logging
.
info
(
"
Extraction time :
"
+
str
(
dataBaseTime
)
+
"
s, Monoview time :
"
+
str
(
monoviewTime
)
+
"
s, Multiview Time :
"
+
str
(
multiviewTime
)
+
"
s, Global Analysis Time :
"
+
str
(
globalAnalysisTime
)
+
"
s, Total Duration :
"
+
str
(
totalTime
)
+
"
s
"
)
#
analyzeLabels(labels, trueLabels, results, directory)
#
logging.debug("Start:\t Analyze Global Results
for iteration
")
#
resultAnalysis(benchmark, results, args.name, times, metrics, directory)
#
logging.debug("Done:\t Analyze Global Results
for iteration
")
#
globalAnalysisTime = time.time() - monoviewTime - dataBaseTime - start - multiviewTime
#
totalTime = time.time() - start
#
logging.info("Extraction time : "+str(dataBaseTime)+
#
"s, Monoview time : "+str(monoviewTime)+
#
"s, Multiview Time : "+str(multiviewTime)+
#
"s, Global Analysis Time : "+str(globalAnalysisTime)+
#
"s, Total Duration : "+str(totalTime)+"s")
return
results
...
...
@@ -382,7 +384,6 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
"
s, Multiview Time :
"
+
str
(
multiviewTime
)
+
"
s, Global Analysis Time :
"
+
str
(
globalAnalysisTime
)
+
"
s, Total Duration :
"
+
str
(
totalTime
)
+
"
s
"
)
return
results
def
initRandomState
(
randomStateArg
,
directory
):
...
...
@@ -642,8 +643,12 @@ if statsIter>1:
logging
.
debug
(
"
Start:
\t
Deleting
"
+
str
(
nbCores
)
+
"
temporary datasets for multiprocessing
"
)
datasetFiles
=
DB
.
deleteHDF5
(
args
.
pathF
,
args
.
name
,
nbCores
)
logging
.
debug
(
"
Start:
\t
Deleting datasets for multiprocessing
"
)
analyzeIterResults
(
iterResults
,
args
.
name
,
metrics
,
directory
)
else
:
res
=
classifyOneIter
(
LABELS_DICTIONARY
,
argumentDictionaries
,
nbCores
,
directories
,
args
,
classificationIndices
,
kFolds
,
classifyOneIter
(
LABELS_DICTIONARY
,
argumentDictionaries
,
nbCores
,
directories
,
args
,
classificationIndices
,
kFolds
,
statsIterRandomStates
,
hyperParamSearch
,
metrics
,
DATASET
,
viewsIndices
,
dataBaseTime
,
start
,
benchmark
,
views
)
if
statsIter
>
1
:
pass
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Code/MonoMutliViewClassifiers/ResultAnalysis.py
+
58
−
1
View file @
89207d22
...
...
@@ -104,3 +104,60 @@ def analyzeLabels(labelsArrays, realLabels, results, directory):
cbar
=
fig
.
colorbar
(
cax
,
ticks
=
[
0
,
1
])
cbar
.
ax
.
set_yticklabels
([
'
Wrong
'
,
'
Right
'
])
fig
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-error_analysis.png
"
)
def
genScoresNames
(
iterResults
,
metric
,
nbResults
,
names
):
validationScores
=
[]
trainScores
=
[]
for
iterindex
,
iterResult
in
enumerate
(
iterResults
):
mono
,
multi
=
iterResult
import
pdb
;
pdb
.
set_trace
()
validationScores
[
iterindex
]
=
[
float
(
res
[
1
][
2
][
metric
[
0
]][
1
])
for
res
in
mono
]
validationScores
[
iterindex
]
+=
[
float
(
scores
[
metric
[
0
]][
1
])
for
a
,
b
,
scores
,
c
in
multi
]
trainScores
[
iterindex
]
=
[
float
(
res
[
1
][
1
][
2
][
metric
[
0
]][
0
])
for
res
in
mono
]
trainScores
[
iterindex
]
+=
[
float
(
scores
[
metric
[
0
]][
0
])
for
a
,
b
,
scores
,
c
in
multi
]
validationScores
[
iterindex
]
=
np
.
array
(
validationScores
)
trainScores
[
iterindex
]
=
np
.
array
(
trainScores
)
validationScores
=
np
.
array
(
validationScores
)
trainScores
=
np
.
array
(
trainScores
)
validationSTDs
=
np
.
std
(
validationScores
,
axis
=
0
)
trainSTDs
=
np
.
std
(
trainScores
,
axis
=
0
)
validationMeans
=
np
.
mean
(
validationScores
,
axis
=
0
)
trainMeans
=
np
.
mean
(
trainScores
,
axis
=
0
)
f
=
pylab
.
figure
(
figsize
=
(
40
,
30
))
width
=
0.35
# the width of the bars
fig
=
plt
.
gcf
()
fig
.
subplots_adjust
(
bottom
=
105.0
,
top
=
105.01
)
ax
=
f
.
add_axes
([
0.1
,
0.1
,
0.8
,
0.8
])
if
metric
[
1
]
!=
None
:
metricKWARGS
=
dict
((
index
,
metricConfig
)
for
index
,
metricConfig
in
enumerate
(
metric
[
1
]))
else
:
metricKWARGS
=
{}
sorted_indices
=
np
.
argsort
(
validationMeans
)
validationMeans
=
validationMeans
[
sorted_indices
]
trainMeans
=
trainMeans
[
sorted_indices
]
names
=
names
[
sorted_indices
]
ax
.
set_title
(
getattr
(
Metrics
,
metric
[
0
]).
getConfig
(
**
metricKWARGS
)
+
"
for each classifier
"
)
rects
=
ax
.
bar
(
range
(
nbResults
),
validationMeans
,
width
,
color
=
"
r
"
,
yerr
=
validationSTDs
)
rect2
=
ax
.
bar
(
np
.
arange
(
nbResults
)
+
width
,
trainMeans
,
width
,
color
=
"
0.7
"
,
yerr
=
trainSTDs
)
autolabel
(
rects
,
ax
)
autolabel
(
rect2
,
ax
)
ax
.
legend
((
rects
[
0
],
rect2
[
0
]),
(
'
Test
'
,
'
Train
'
))
ax
.
set_xticks
(
np
.
arange
(
nbResults
)
+
width
)
ax
.
set_xticklabels
(
names
,
rotation
=
"
vertical
"
)
return
f
def
analyzeIterResults
(
iterResults
,
name
,
metrics
,
directory
):
nbResults
=
len
(
iterResults
[
0
][
0
])
+
len
(
iterResults
[
0
][
1
])
nbIter
=
len
(
iterResults
)
names
=
genNamesFromRes
(
iterResults
[
0
][
0
],
iterResults
[
0
][
1
])
for
metric
in
metrics
:
figure
=
genScoresNames
(
iterResults
,
metric
,
nbResults
,
names
)
figure
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-
"
+
name
+
"
-Mean_on_
"
+
str
(
nbIter
)
+
"
_iter-
"
+
metric
[
0
]
+
"
.png
"
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment