Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Supervised MultiModal Integration Tool
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Baptiste Bauvin
Supervised MultiModal Integration Tool
Commits
60b6ce49
Commit
60b6ce49
authored
7 years ago
by
Baptiste Bauvin
Browse files
Options
Downloads
Patches
Plain Diff
Wrote some doc
parent
a77bacdc
No related branches found
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
Code/MonoMultiViewClassifiers/ExecClassif.py
+16
-14
16 additions, 14 deletions
Code/MonoMultiViewClassifiers/ExecClassif.py
Code/MonoMultiViewClassifiers/ResultAnalysis.py
+47
-37
47 additions, 37 deletions
Code/MonoMultiViewClassifiers/ResultAnalysis.py
Code/Versions.py
+1
-0
1 addition, 0 deletions
Code/Versions.py
with
64 additions
and
51 deletions
Code/MonoMultiViewClassifiers/ExecClassif.py
+
16
−
14
View file @
60b6ce49
# Import built-in modules
# import pdb;pdb.set_trace()
import
errno
import
logging
import
math
import
os
import
pkgutil
# for TimeStamp in CSVFile
import
pkgutil
import
time
import
matplotlib
import
numpy
as
np
from
joblib
import
Parallel
,
delayed
import
h5py
matplotlib
.
use
(
'
Agg
'
)
# Anti-Grain Geometry C++ library to make a raster (pixel) image of the figure
import
h5py
# Import own modules
# import Metrics
# from . import Multiview
from
.
import
MonoviewClassifiers
from
.
import
MultiviewClassifiers
from
.Multiview.ExecMultiview
import
ExecMultiview
,
ExecMultiview_multicore
...
...
@@ -70,7 +66,8 @@ def initBenchmark(args):
return
benchmark
def
initMonoviewArguments
(
benchmark
,
argumentDictionaries
,
views
,
allViews
,
NB_CLASS
,
kwargsInit
):
def
initMonoviewExps
(
benchmark
,
argumentDictionaries
,
views
,
allViews
,
NB_CLASS
,
kwargsInit
):
"""
Used to add each monoview exeperience args to the list of monoview experiences args
"""
if
benchmark
[
"
Monoview
"
]:
argumentDictionaries
[
"
Monoview
"
]
=
[]
for
view
in
views
:
...
...
@@ -83,6 +80,7 @@ def initMonoviewArguments(benchmark, argumentDictionaries, views, allViews, NB_C
def
initMonoviewKWARGS
(
args
,
classifiersNames
):
"""
Used to init kwargs thanks to a function in each monoview classifier package
"""
monoviewKWARGS
=
{}
for
classifiersName
in
classifiersNames
:
classifierModule
=
getattr
(
MonoviewClassifiers
,
classifiersName
)
...
...
@@ -114,6 +112,7 @@ def initKWARGSFunc(args, benchmark):
def
initMultiviewArguments
(
args
,
benchmark
,
views
,
viewsIndices
,
argumentDictionaries
,
randomState
,
directory
,
resultsMonoview
,
classificationIndices
):
"""
Used to add each monoview exeperience args to the list of monoview experiences args
"""
multiviewArguments
=
[]
if
"
Multiview
"
in
benchmark
:
for
multiviewAlgoName
in
benchmark
[
"
Multiview
"
]:
...
...
@@ -126,6 +125,8 @@ def initMultiviewArguments(args, benchmark, views, viewsIndices, argumentDiction
def
arangeMetrics
(
metrics
,
metricPrinc
):
"""
Used to get the metrics list in the right order so that
the first one is the principal metric specified in args
"""
if
[
metricPrinc
]
in
metrics
:
metricIndex
=
metrics
.
index
([
metricPrinc
])
firstMetric
=
metrics
[
0
]
...
...
@@ -141,6 +142,8 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
randomState
,
hyperParamSearch
,
metrics
,
coreIndex
,
viewsIndices
,
dataBaseTime
,
start
,
benchmark
,
views
):
"""
Used to execute mono and multiview classification and result analysis for one random state
using multicore classification
"""
resultsMonoview
=
[]
np
.
savetxt
(
directory
+
"
train_indices.csv
"
,
classificationIndices
[
0
],
delimiter
=
"
,
"
)
labelsNames
=
LABELS_DICTIONARY
.
values
()
...
...
@@ -187,7 +190,9 @@ def classifyOneIter_multicore(LABELS_DICTIONARY, argumentDictionaries, nbCores,
def
classifyOneIter
(
LABELS_DICTIONARY
,
argumentDictionaries
,
nbCores
,
directory
,
args
,
classificationIndices
,
kFolds
,
randomState
,
hyperParamSearch
,
metrics
,
DATASET
,
viewsIndices
,
dataBaseTime
,
start
,
benchmark
,
views
):
"""
Used to execute mono and multiview classification and result analysis for one random state
classification
"""
#TODO : Clarify this one
np
.
savetxt
(
directory
+
"
train_indices.csv
"
,
classificationIndices
[
0
],
delimiter
=
"
,
"
)
resultsMonoview
=
[]
labelsNames
=
LABELS_DICTIONARY
.
values
()
...
...
@@ -258,11 +263,8 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
return
results
,
labelAnalysis
# _______________ #
# __ EXECUTION __ #
# _______________ #
def
execClassif
(
arguments
):
# import pdb;pdb.set_trace()
"""
Main function to execute the benchmark
"""
start
=
time
.
time
()
args
=
execution
.
parseTheArgs
(
arguments
)
...
...
@@ -319,7 +321,7 @@ def execClassif(arguments):
dataBaseTime
=
time
.
time
()
-
start
argumentDictionaries
=
{
"
Monoview
"
:
[],
"
Multiview
"
:
[]}
argumentDictionaries
=
initMonoview
Argument
s
(
benchmark
,
argumentDictionaries
,
views
,
allViews
,
NB_CLASS
,
argumentDictionaries
=
initMonoview
Exp
s
(
benchmark
,
argumentDictionaries
,
views
,
allViews
,
NB_CLASS
,
initKWARGS
)
directories
=
execution
.
genDirecortiesNames
(
directory
,
statsIter
)
...
...
This diff is collapsed.
Click to expand it.
Code/MonoMultiViewClassifiers/ResultAnalysis.py
+
47
−
37
View file @
60b6ce49
...
...
@@ -22,6 +22,7 @@ __status__ = "Prototype" # Production, Development, Prototype
def
autolabel
(
rects
,
ax
):
"""
Used to print scores on top of the bars
"""
for
rect
in
rects
:
height
=
rect
.
get_height
()
ax
.
text
(
rect
.
get_x
()
+
rect
.
get_width
()
/
2.
,
1.01
*
height
,
...
...
@@ -30,6 +31,7 @@ def autolabel(rects, ax):
def
genFusionName
(
type_
,
a
,
b
,
c
):
"""
Used to generate fusion classifiers names
"""
if
type_
==
"
Fusion
"
and
a
[
"
fusionType
"
]
!=
"
EarlyFusion
"
:
return
"
Late-
"
+
str
(
a
[
"
fusionMethod
"
])
elif
type_
==
"
Fusion
"
and
a
[
"
fusionType
"
]
!=
"
LateFusion
"
:
...
...
@@ -37,12 +39,14 @@ def genFusionName(type_, a, b, c):
def
genNamesFromRes
(
mono
,
multi
):
"""
Used to generate classifiers names list (inthe right order) from mono- and multi-view results
"""
names
=
[
res
[
1
][
0
]
+
"
-
"
+
res
[
1
][
1
][
-
1
]
for
res
in
mono
]
names
+=
[
type_
if
type_
!=
"
Fusion
"
else
genFusionName
(
type_
,
a
,
b
,
c
)
for
type_
,
a
,
b
,
c
in
multi
]
return
names
def
resultAnalysis
(
benchmark
,
results
,
name
,
times
,
metrics
,
directory
,
minSize
=
10
):
"""
Used to generate bar graphs of all the classifiers scores for each metric
"""
mono
,
multi
=
results
for
metric
in
metrics
:
logging
.
debug
(
"
Start:
\t
Score graph generation for
"
+
metric
[
0
])
...
...
@@ -82,42 +86,8 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize=
logging
.
debug
(
"
Done:
\t
Score graph generation for
"
+
metric
[
0
])
def
analyzeIterLabels
(
labelsAnalysisList
,
directory
,
classifiersNames
,
minSize
=
10
):
logging
.
debug
(
"
Start:
\t
Global label analysis figure generation
"
)
nbExamples
=
labelsAnalysisList
[
0
].
shape
[
0
]
nbClassifiers
=
len
(
classifiersNames
)
nbIter
=
2
figWidth
=
max
(
nbClassifiers
/
2
,
minSize
)
figHeight
=
max
(
nbExamples
/
20
,
minSize
)
figKW
=
{
"
figsize
"
:
(
figWidth
,
figHeight
)}
fig
,
ax
=
plt
.
subplots
(
nrows
=
1
,
ncols
=
1
,
**
figKW
)
data
=
sum
(
labelsAnalysisList
)
cax
=
plt
.
imshow
(
-
data
,
interpolation
=
'
none
'
,
cmap
=
"
Greys
"
,
aspect
=
'
auto
'
)
plt
.
title
(
'
Errors depending on the classifier
'
)
ticks
=
np
.
arange
(
nbIter
/
2
-
0.5
,
nbClassifiers
*
nbIter
,
nbIter
)
plt
.
xticks
(
ticks
,
classifiersNames
,
rotation
=
"
vertical
"
)
cbar
=
fig
.
colorbar
(
cax
,
ticks
=
[
0
,
-
len
(
labelsAnalysisList
)])
cbar
.
ax
.
set_yticklabels
([
'
Always Wrong
'
,
'
Always Right
'
])
fig
.
tight_layout
()
fig
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-error_analysis.png
"
)
plt
.
close
()
logging
.
debug
(
"
Done:
\t
Global label analysis figure generation
"
)
logging
.
debug
(
"
Start:
\t
Global error by example figure generation
"
)
errorOnExamples
=
-
1
*
np
.
sum
(
data
,
axis
=
1
)
/
nbIter
+
(
nbClassifiers
*
len
(
labelsAnalysisList
))
np
.
savetxt
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-clf_errors.csv
"
,
data
,
delimiter
=
"
,
"
)
np
.
savetxt
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-example_errors.csv
"
,
errorOnExamples
,
delimiter
=
"
,
"
)
fig
,
ax
=
plt
.
subplots
()
x
=
np
.
arange
(
nbExamples
)
plt
.
bar
(
x
,
errorOnExamples
)
plt
.
ylim
([
0
,
nbClassifiers
*
len
(
labelsAnalysisList
)])
plt
.
title
(
"
Number of classifiers that failed to classify each example
"
)
fig
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-example_errors.png
"
)
plt
.
close
()
logging
.
debug
(
"
Done:
\t
Global error by example figure generation
"
)
def
analyzeLabels
(
labelsArrays
,
realLabels
,
results
,
directory
,
minSize
=
10
):
"""
Used to generate a graph showing errors on each example depending on classifier
"""
logging
.
debug
(
"
Start:
\t
Label analysis figure generation
"
)
mono
,
multi
=
results
classifiersNames
=
genNamesFromRes
(
mono
,
multi
)
...
...
@@ -164,7 +134,47 @@ def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10):
return
data
def
genScoresNames
(
iterResults
,
metric
,
nbResults
,
names
,
nbMono
,
minSize
=
10
):
def
analyzeIterLabels
(
labelsAnalysisList
,
directory
,
classifiersNames
,
minSize
=
10
):
"""
Used to generate a graph showing errors on each example depending on classifierusing a score
if multiple iterations
"""
logging
.
debug
(
"
Start:
\t
Global label analysis figure generation
"
)
nbExamples
=
labelsAnalysisList
[
0
].
shape
[
0
]
nbClassifiers
=
len
(
classifiersNames
)
nbIter
=
2
figWidth
=
max
(
nbClassifiers
/
2
,
minSize
)
figHeight
=
max
(
nbExamples
/
20
,
minSize
)
figKW
=
{
"
figsize
"
:
(
figWidth
,
figHeight
)}
fig
,
ax
=
plt
.
subplots
(
nrows
=
1
,
ncols
=
1
,
**
figKW
)
data
=
sum
(
labelsAnalysisList
)
cax
=
plt
.
imshow
(
-
data
,
interpolation
=
'
none
'
,
cmap
=
"
Greys
"
,
aspect
=
'
auto
'
)
plt
.
title
(
'
Errors depending on the classifier
'
)
ticks
=
np
.
arange
(
nbIter
/
2
-
0.5
,
nbClassifiers
*
nbIter
,
nbIter
)
plt
.
xticks
(
ticks
,
classifiersNames
,
rotation
=
"
vertical
"
)
cbar
=
fig
.
colorbar
(
cax
,
ticks
=
[
0
,
-
len
(
labelsAnalysisList
)])
cbar
.
ax
.
set_yticklabels
([
'
Always Wrong
'
,
'
Always Right
'
])
fig
.
tight_layout
()
fig
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-error_analysis.png
"
)
plt
.
close
()
logging
.
debug
(
"
Done:
\t
Global label analysis figure generation
"
)
logging
.
debug
(
"
Start:
\t
Global error by example figure generation
"
)
errorOnExamples
=
-
1
*
np
.
sum
(
data
,
axis
=
1
)
/
nbIter
+
(
nbClassifiers
*
len
(
labelsAnalysisList
))
np
.
savetxt
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-clf_errors.csv
"
,
data
,
delimiter
=
"
,
"
)
np
.
savetxt
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-example_errors.csv
"
,
errorOnExamples
,
delimiter
=
"
,
"
)
fig
,
ax
=
plt
.
subplots
()
x
=
np
.
arange
(
nbExamples
)
plt
.
bar
(
x
,
errorOnExamples
)
plt
.
ylim
([
0
,
nbClassifiers
*
len
(
labelsAnalysisList
)])
plt
.
title
(
"
Number of classifiers that failed to classify each example
"
)
fig
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-example_errors.png
"
)
plt
.
close
()
logging
.
debug
(
"
Done:
\t
Global error by example figure generation
"
)
def
genFig
(
iterResults
,
metric
,
nbResults
,
names
,
nbMono
,
minSize
=
10
):
"""
Used to generate the bar graph representing the mean scores of each classifiers if multiple iteration
with different random states
"""
nbIter
=
len
(
iterResults
)
validationScores
=
np
.
zeros
((
nbIter
,
nbResults
))
trainScores
=
np
.
zeros
((
nbIter
,
nbResults
))
...
...
@@ -213,7 +223,7 @@ def analyzeIterResults(iterResults, name, metrics, directory):
names
=
genNamesFromRes
(
iterResults
[
0
][
0
],
iterResults
[
0
][
1
])
for
metric
in
metrics
:
logging
.
debug
(
"
Start:
\t
Global score graph generation for
"
+
metric
[
0
])
figure
=
gen
ScoresNames
(
iterResults
,
metric
,
nbResults
,
names
,
nbMono
)
figure
=
gen
Fig
(
iterResults
,
metric
,
nbResults
,
names
,
nbMono
)
figure
.
savefig
(
directory
+
time
.
strftime
(
"
%Y%m%d-%H%M%S
"
)
+
"
-
"
+
name
+
"
-Mean_on_
"
+
str
(
nbIter
)
+
"
_iter-
"
+
metric
[
0
]
+
"
.png
"
)
logging
.
debug
(
"
Done:
\t
Global score graph generation for
"
+
metric
[
0
])
This diff is collapsed.
Click to expand it.
Code/Versions.py
+
1
−
0
View file @
60b6ce49
...
...
@@ -4,6 +4,7 @@ __status__ = "Prototype" # Production, Development, Prototype
def
testVersions
():
"""
Used to test if all prerequisites are installed
"""
isUpToDate
=
True
toInstall
=
[]
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment