Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
N
nystrom-layer
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Luc Giffon
nystrom-layer
Commits
3079463b
Commit
3079463b
authored
5 years ago
by
Luc Giffon
Browse files
Options
Downloads
Patches
Plain Diff
add documentation to python files
parent
429fba23
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
keras_kernel_functions.py
+25
-3
25 additions, 3 deletions
keras_kernel_functions.py
nystrom_layer.py
+19
-2
19 additions, 2 deletions
nystrom_layer.py
with
44 additions
and
5 deletions
keras_kernel_functions.py
+
25
−
3
View file @
3079463b
...
@@ -19,7 +19,7 @@ def keras_linear_kernel(args, normalize=True, tanh_activation=False):
...
@@ -19,7 +19,7 @@ def keras_linear_kernel(args, normalize=True, tanh_activation=False):
:param args: list of size 2 containing x and y
:param args: list of size 2 containing x and y
:param normalize: if True, normalize the input with l2 before computing the kernel function
:param normalize: if True, normalize the input with l2 before computing the kernel function
:param tanh_activation: if True apply tanh activation to the output
:param tanh_activation: if True apply tanh activation to the output
:return:
:return:
The linear kernel between args[0] and args[1]
"""
"""
X
=
args
[
0
]
X
=
args
[
0
]
Y
=
args
[
1
]
Y
=
args
[
1
]
...
@@ -35,6 +35,17 @@ def keras_linear_kernel(args, normalize=True, tanh_activation=False):
...
@@ -35,6 +35,17 @@ def keras_linear_kernel(args, normalize=True, tanh_activation=False):
def
keras_chi_square_CPD
(
args
,
epsilon
=
None
,
tanh_activation
=
True
,
normalize
=
False
):
def
keras_chi_square_CPD
(
args
,
epsilon
=
None
,
tanh_activation
=
True
,
normalize
=
False
):
"""
Chi square kernel (equivalent to `additive_chi2_kernel` in scikit-learn):
$k(x, y) = -Sum [(x - y)^2 / (x + y)]$
:param args: list of size 2 containing x and y
:param epsilon: very small value to add to the denominator so that we do not have zeros here
:param tanh_activation: if True apply tanh activation to the output
:param normalize: if True, normalize the input with l2 before computing the kernel function
:return: The chi square kernel between args[0] and args[1]
"""
X
=
args
[
0
]
X
=
args
[
0
]
Y
=
args
[
1
]
Y
=
args
[
1
]
if
normalize
:
if
normalize
:
...
@@ -59,6 +70,17 @@ def keras_chi_square_CPD(args, epsilon=None, tanh_activation=True, normalize=Fal
...
@@ -59,6 +70,17 @@ def keras_chi_square_CPD(args, epsilon=None, tanh_activation=True, normalize=Fal
def
keras_chi_square_CPD_exp
(
args
,
gamma
,
epsilon
=
None
,
tanh_activation
=
False
,
normalize
=
True
):
def
keras_chi_square_CPD_exp
(
args
,
gamma
,
epsilon
=
None
,
tanh_activation
=
False
,
normalize
=
True
):
"""
Exponential chi square kernel (equivalent to `chi2_kernel` in scikit-learn):
$k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])$
:param args: list of size 2 containing x and y
:param epsilon: very small value to add to the denominator so that we do not have zeros here
:param tanh_activation: if True apply tanh activation to the output
:param normalize: if True, normalize the input with l2 before computing the kernel function
:return: The exponential chi square kernel between args[0] and args[1]
"""
result
=
keras_chi_square_CPD
(
args
,
epsilon
,
tanh_activation
,
normalize
)
result
=
keras_chi_square_CPD
(
args
,
epsilon
,
tanh_activation
,
normalize
)
result
*=
gamma
result
*=
gamma
return
K
.
exp
(
result
)
return
K
.
exp
(
result
)
...
@@ -68,12 +90,12 @@ def keras_rbf_kernel(args, gamma, normalize=True, tanh_activation=False):
...
@@ -68,12 +90,12 @@ def keras_rbf_kernel(args, gamma, normalize=True, tanh_activation=False):
"""
"""
Compute the rbf kernel between each entry of X and each line of Y.
Compute the rbf kernel between each entry of X and each line of Y.
tf_rbf_kernel
(x, y, gamma) = exp(- (||x - y||^2 * gamma))
$
(x, y, gamma) = exp(- (||x - y||^2 * gamma))
$
:param X: A tensor of size n times d
:param X: A tensor of size n times d
:param Y: A tensor of size m times d
:param Y: A tensor of size m times d
:param gamma: The bandwith of the kernel
:param gamma: The bandwith of the kernel
:return:
:return:
The RBF kernel between args[0] and args[1]
"""
"""
X
=
args
[
0
]
X
=
args
[
0
]
Y
=
args
[
1
]
Y
=
args
[
1
]
...
...
This diff is collapsed.
Click to expand it.
nystrom_layer.py
+
19
−
2
View file @
3079463b
...
@@ -10,7 +10,18 @@ from keras.preprocessing.image import ImageDataGenerator
...
@@ -10,7 +10,18 @@ from keras.preprocessing.image import ImageDataGenerator
from
keras_kernel_functions
import
keras_linear_kernel
from
keras_kernel_functions
import
keras_linear_kernel
def
datagen_fixed_batch_size
(
x
,
y
,
x_sub
=
None
,
p_datagen
=
ImageDataGenerator
()):
def
datagen_fixed_batch_size
(
x
,
y
,
batch_size
,
x_sub
=
None
,
p_datagen
=
ImageDataGenerator
()):
"""
Wrap a data generator so that:
- it always output batches of the same size
- it gives a subsample along with each batch
:param x: observation data
:param y: label data
:param x_sub: list of base of subsample (each base must be of size batch_size)
:param p_datagen: the initial data generator to wrap
:return:
"""
if
x_sub
is
None
:
if
x_sub
is
None
:
x_sub
=
[]
x_sub
=
[]
for
x_batch
,
y_batch
in
p_datagen
.
flow
(
x
,
y
,
batch_size
=
batch_size
):
for
x_batch
,
y_batch
in
p_datagen
.
flow
(
x
,
y
,
batch_size
=
batch_size
):
...
@@ -59,6 +70,8 @@ def init_number_subsample_bases(nys_size, batch_size):
...
@@ -59,6 +70,8 @@ def init_number_subsample_bases(nys_size, batch_size):
return
quotient
+
1
,
batch_size
-
remaining
return
quotient
+
1
,
batch_size
-
remaining
if
__name__
==
"
__main__
"
:
if
__name__
==
"
__main__
"
:
# model meta parameters
# ---------------------
batch_size
=
128
batch_size
=
128
epochs
=
1
epochs
=
1
num_classes
=
10
num_classes
=
10
...
@@ -126,11 +139,15 @@ if __name__ == "__main__":
...
@@ -126,11 +139,15 @@ if __name__ == "__main__":
# weight matrix of the nystrom layer
# weight matrix of the nystrom layer
input_classifier
=
Dense
(
nys_size
,
use_bias
=
False
,
activation
=
'
linear
'
)(
kernel_vector
)
# metric matrix of the Nyström layer
input_classifier
=
Dense
(
nys_size
,
use_bias
=
False
,
activation
=
'
linear
'
)(
kernel_vector
)
# metric matrix of the Nyström layer
# final softmax classification layer
# ----------------------------------
classif
=
Dense
(
num_classes
,
activation
=
"
softmax
"
)(
input_classifier
)
classif
=
Dense
(
num_classes
,
activation
=
"
softmax
"
)(
input_classifier
)
# finalization of model, compilation and training
# -----------------------------------------------
model
=
Model
([
input_x
]
+
input_repr_subsample
,
[
classif
])
model
=
Model
([
input_x
]
+
input_repr_subsample
,
[
classif
])
adam
=
Adam
(
lr
=
.
1
)
adam
=
Adam
(
lr
=
.
1
)
model
.
compile
(
loss
=
'
categorical_crossentropy
'
,
optimizer
=
adam
,
metrics
=
[
'
accuracy
'
])
model
.
compile
(
loss
=
'
categorical_crossentropy
'
,
optimizer
=
adam
,
metrics
=
[
'
accuracy
'
])
model
.
fit_generator
(
datagen_fixed_batch_size
(
x_train
,
y_train
,
list_subsample_bases
,
datagen
),
model
.
fit_generator
(
datagen_fixed_batch_size
(
x_train
,
y_train
,
batch_size
,
list_subsample_bases
,
datagen
),
steps_per_epoch
=
int
(
x_train
.
shape
[
0
]
/
batch_size
),
steps_per_epoch
=
int
(
x_train
.
shape
[
0
]
/
batch_size
),
epochs
=
epochs
)
epochs
=
epochs
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment