Newer
Older
"""
Convolutional Neural Netwok implementation in tensorflow whith multiple representations possible after the convolution:
- Fully connected layer
- Random Fourier Features layer
- Fast Food layer where Fast Hadamard Transform has been replaced by dot product with Hadamard matrix.
See:
"Deep Fried Convnets" by
Zichao Yang, Marcin Moczulski, Misha Denil, Nando de Freitas, Alex Smola, Le Song, Ziyu Wang
"""
import tensorflow as tf
import numpy as np
import skluc.mldatasets as dataset
tf.logging.set_verbosity(tf.logging.ERROR)
import time as t
from sklearn.preprocessing import LabelBinarizer
enc = LabelBinarizer()
mnist = dataset.MnistDataset()
mnist = mnist.load()
X_train, Y_train = mnist["train"]
X_train = np.array(X_train / 255)
enc.fit(Y_train)
Y_train = np.array(enc.transform(Y_train))
X_test, Y_test = mnist["test"]
X_test = np.array(X_test / 255)
Y_test = np.array(enc.transform(Y_test))
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
Y_train = Y_train.astype(np.float32)
Y_test = Y_test.astype(np.float32)
# --- Usual functions --- #
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="weights")
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="biases")
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
Luc Giffon
committed
def convolution_mnist(input):
with tf.name_scope("conv_pool_1"):
# 32 is the number of filter we'll use. e.g. the number of different
# shapes this layer is able to recognize
Luc Giffon
committed
W_conv1 = weight_variable([5, 5, 1, 20])
tf.summary.histogram("weights conv1", W_conv1)
Luc Giffon
committed
b_conv1 = bias_variable([20])
tf.summary.histogram("biases conv1", b_conv1)
# -1 is here to keep the total size constant (784)
h_conv1 = tf.nn.relu(conv2d(input, W_conv1) + b_conv1)
tf.summary.histogram("act conv1", h_conv1)
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope("conv_pool_2"):
Luc Giffon
committed
W_conv2 = weight_variable([5, 5, 20, 50])
tf.summary.histogram("weights conv2", W_conv2)
Luc Giffon
committed
b_conv2 = bias_variable([50])
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
tf.summary.histogram("biases conv2", b_conv2)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
tf.summary.histogram("act conv2", h_conv2)
h_pool2 = max_pool_2x2(h_conv2)
return h_pool2
# --- Random Fourier Features --- #
def random_variable(shape, sigma):
W = np.random.normal(size=shape, scale=sigma).astype(np.float32)
return tf.Variable(W, name="random_Weights", trainable=False)
def random_biases(shape):
b = np.random.uniform(0, 2 * np.pi, size=shape).astype(np.float32)
return tf.Variable(b, name="random_biase", trainable=False)
# --- Representation Layer --- #
def random_features(conv_out, sigma):
with tf.name_scope("random_features"):
init_dim = np.prod([s.value for s in conv_out.shape if s.value is not None])
conv_out2 = tf.reshape(conv_out, [-1, init_dim])
W = random_variable((init_dim, init_dim), sigma)
b = random_biases(init_dim)
h1 = tf.matmul(conv_out2, W, name="Wx") + b
h1_cos = tf.cos(h1)
h1_final = tf.scalar_mul(np.sqrt(2.0 / init_dim).astype(np.float32), h1_cos)
return h1_final
def fully_connected(conv_out):
with tf.name_scope("fc_1"):
h_pool2_flat = tf.reshape(conv_out, [-1, 7 * 7 * 64])
W_fc1 = weight_variable([7 * 7 * 64, 4096*2])
b_fc1 = bias_variable([4096*2])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
tf.summary.histogram("weights", W_fc1)
tf.summary.histogram("biases", b_fc1)
return h_fc1
def get_next_batch(full_set, batch_nbr, batch_size):
"""
Return the next batch of a dataset.
This function assumes that all the previous batches of this dataset have been taken with the same size.
:param full_set: the full dataset from which the batch will be taken
:param batch_nbr: the number of the batch
:param batch_size: the size of the batch
:return:
"""
index_start = (batch_nbr * batch_size) % full_set.shape[0]
index_stop = ((batch_nbr + 1) * batch_size) % full_set.shape[0]
if index_stop > index_start:
return full_set[index_start:index_stop]
else:
part1 = full_set[index_start:]
part2 = full_set[:index_stop]
return np.vstack((part1, part2))
Luc Giffon
committed
if __name__ == '__main__':
Luc Giffon
committed
SIGMA = 5.0
print("Sigma = {}".format(SIGMA))
with tf.Graph().as_default():
# todo parametrize dataset
input_dim, output_dim = X_train.shape[1], Y_train.shape[1]
Luc Giffon
committed
x = tf.placeholder(tf.float32, shape=[None, input_dim], name="x")
y_ = tf.placeholder(tf.float32, shape=[None, output_dim], name="labels")
Luc Giffon
committed
# side size is width or height of the images
side_size = int(np.sqrt(input_dim))
x_image = tf.reshape(x, [-1, side_size, side_size, 1])
tf.summary.image("digit", x_image, max_outputs=3)
# Representation layer
Luc Giffon
committed
h_conv = convolution_mnist(x_image)
# h_conv = x
# out_fc = fully_connected(h_conv) # 95% accuracy
# out_fc = tf.nn.relu(fast_food(h_conv, SIGMA, nbr_stack=1)) # 83% accuracy (conv) | 56% accuracy (noconv)
out_fc = tf.nn.relu(fast_food(h_conv, SIGMA, nbr_stack=2))
# out_fc = tf.nn.relu(fast_food(h_conv, SIGMA, nbr_stack=2, trainable=True))
# out_fc = tf.nn.relu(fast_food(h_conv, SIGMA, trainable=True)) # 84% accuracy (conv) | 59% accuracy (noconv)
Luc Giffon
committed
# out_fc = fast_food(h_conv, SIGMA, diag=True, trainable=True) # 84% accuracy (conv) | 59% accuracy (noconv)
# out_fc = random_features(h_conv, SIGMA) # 82% accuracy (conv) | 47% accuracy (noconv)
# classification
with tf.name_scope("fc_2"):
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
h_fc1_drop = tf.nn.dropout(out_fc, keep_prob)
dim = np.prod([s.value for s in h_fc1_drop.shape if s.value is not None])
Luc Giffon
committed
W_fc2 = weight_variable([dim, output_dim])
b_fc2 = bias_variable([output_dim])
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
tf.summary.histogram("weights", W_fc2)
tf.summary.histogram("biases", b_fc2)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# calcul de la loss
with tf.name_scope("xent"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv, name="xentropy"),
name="xentropy_mean")
tf.summary.scalar('loss-xent', cross_entropy)
# calcul du gradient
with tf.name_scope("train"):
global_step = tf.Variable(0, name="global_step", trainable=False)
train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy, global_step=global_step)
# calcul de l'accuracy
with tf.name_scope("accuracy"):
predictions = tf.argmax(y_conv, 1)
correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
merged_summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
Luc Giffon
committed
summary_writer = tf.summary.FileWriter("results_deepfried_stacked")
summary_writer.add_graph(sess.graph)
# Initialize all Variable objects
sess.run(init)
# actual learning
started = t.time()
for i in range(1100):
X_batch = get_next_batch(X_train, i, 64)
Y_batch = get_next_batch(Y_train, i, 64)
feed_dict = {x: X_batch, y_: Y_batch, keep_prob: 0.5}
# le _ est pour capturer le retour de "train_optimizer" qu'il faut appeler
# pour calculer le gradient mais dont l'output ne nous interesse pas
_, loss = sess.run([train_optimizer, cross_entropy], feed_dict=feed_dict)
if i % 100 == 0:
print('step {}, loss {} (with dropout)'.format(i, loss))
summary_str = sess.run(merged_summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, i)
accuracy, preds = sess.run([accuracy, predictions], feed_dict={
x: X_test, y_: Y_test, keep_prob: 1.0})
print('test accuracy %g' % accuracy)
np.set_printoptions(threshold=np.nan)
print("Prediction sample: " + str(preds[:50]))
print("Actual values: " + str(np.argmax(Y_test[:50], axis=1)))
print("Elapsed time: %.4f s" % (stoped - started))