Skip to content
Snippets Groups Projects
Commit 5207f119 authored by Luc Giffon's avatar Luc Giffon
Browse files

example of keras + tensorflow model with the nystrom layer

parent cad5791a
No related branches found
No related tags found
No related merge requests found
"""
Example of how vgg and deepstrom could be co-trained. (deepstrom on top of vgg convolution layers)
"""
from skluc.tensorflow_.kernel_approximation import nystrom_layer
from skluc.tensorflow_.utils import classification_cifar, batch_generator
from time import time
from keras.applications.vgg19 import VGG19
import skluc.mldatasets as dataset
import numpy as np
import tensorflow as tf
if __name__ == '__main__':
cifar = dataset.Cifar10Dataset(validation_size=1000)
cifar.load()
cifar.to_image()
cifar.to_one_hot()
cifar.normalize()
cifar.data_astype(np.float32)
cifar.labels_astype(np.float32)
cifar.resize_zoom(2)
num_epoch = 100
batch_size = 54
subsample = cifar.train[0][:100]
input_dim, output_dim = cifar.train[0].shape[1:], cifar.train[1].shape[1]
with tf.Graph().as_default():
vgg_conv_model = VGG19(include_top=False, weights='imagenet', input_shape=input_dim)
i = 0
for layer in vgg_conv_model.layers:
layer.trainable = False
i = i + 1
print(i, layer.name)
x = tf.placeholder(tf.float32, shape=[None, *input_dim], name="x")
tf.summary.image("input", x)
x_subsample = tf.Variable(subsample, name="x_subsample", trainable=False)
y_ = tf.placeholder(tf.float32, shape=[None, output_dim], name="labels")
conv_x = vgg_conv_model(x)
conv_subsample = vgg_conv_model(x_subsample)
out_nystrom = nystrom_layer(conv_x, conv_subsample, 1, 100)
y_conv, keep_prob = classification_cifar(out_nystrom, output_dim=output_dim)
# # calcul de la loss
with tf.name_scope("xent"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv, name="xentropy"),
name="xentropy_mean")
tf.summary.scalar('loss-xent', cross_entropy)
# # calcul du gradient
with tf.name_scope("train"):
global_step = tf.Variable(0, name="global_step", trainable=False)
train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy, global_step=global_step)
# # calcul de l'accuracy
with tf.name_scope("accuracy"):
predictions = tf.argmax(y_conv, 1)
correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy_op)
init = tf.global_variables_initializer()
merged_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter("log/".format(time()))
# Create a session for running Ops on the Graph.
with tf.Session() as sess:
summary_writer.add_graph(sess.graph)
# Initialize all Variable objects
sess.run(init)
# actual learning
feed_dict_val = {x: cifar.validation[0], y_: cifar.validation[1], keep_prob: 1.0}
for i in range(num_epoch):
j = 0
start = time()
for X_batch, Y_batch in batch_generator(cifar.train[0], cifar.train[1], batch_size, True):
feed_dict = {x: X_batch, y_: Y_batch, keep_prob: 0.5}
_, loss = sess.run([train_optimizer, cross_entropy], feed_dict=feed_dict)
if j % 100 == 0:
print('batch {}/{}, loss {} (with dropout), {:.2f}s / batch'
.format((j+1)*(i+1), int(num_epoch*(cifar.train[0].shape[0]/batch_size)), loss, (time()-start)/100))
r_accuracy = sess.run([accuracy_op], feed_dict=feed_dict_val)
summary_str = sess.run(merged_summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, (j+1)*(i+1))
start = time()
j += 1
accuracies = []
i = 0
for X_batch, Y_batch in batch_generator(cifar.test[0], cifar.test[1], 1000, True):
accuracy = sess.run([accuracy_op], feed_dict={
x: X_batch, y_: Y_batch, keep_prob: 1.0})
accuracies.append(accuracy[0])
i += 1
print(sum(accuracies) / i)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment