Skip to content
Snippets Groups Projects
Commit 763e8da1 authored by Luc Giffon's avatar Luc Giffon
Browse files

centralisation des fonctions pour concevoir les layers + fonction...

centralisation des fonctions pour concevoir les layers + fonction get_next_batch + fonction tf_op + centralisation des fonctions pour construire les couches du reseau mnist + exemple de timing d'une couche tf + utilisation des definition communes de couches dans les exemple de graphes
parent 468467ee
Branches
No related tags found
No related merge requests found
......@@ -8,7 +8,7 @@ where the input comes from memory.
import tensorflow as tf
import numpy as np
import skluc.mldatasets as dataset
from skluc.neural_networks import bias_variable, weight_variable, conv2d, max_pool_2x2
from skluc.neural_networks import inference_mnist, get_next_batch
tf.logging.set_verbosity(tf.logging.ERROR)
......@@ -43,76 +43,7 @@ Y_test = Y_test.astype(np.float32)
#################################################
def convolution_mnist(input):
with tf.name_scope("conv_pool_1"):
# 32 is the number of filter we'll use. e.g. the number of different
# shapes this layer is able to recognize
W_conv1 = weight_variable([5, 5, 1, 20])
tf.summary.histogram("weights conv1", W_conv1)
b_conv1 = bias_variable([20])
tf.summary.histogram("biases conv1", b_conv1)
# -1 is here to keep the total size constant (784)
h_conv1 = tf.nn.relu(conv2d(input, W_conv1) + b_conv1)
tf.summary.histogram("act conv1", h_conv1)
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope("conv_pool_2"):
W_conv2 = weight_variable([5, 5, 20, 50])
tf.summary.histogram("weights conv2", W_conv2)
b_conv2 = bias_variable([50])
tf.summary.histogram("biases conv2", b_conv2)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
tf.summary.histogram("act conv2", h_conv2)
h_pool2 = max_pool_2x2(h_conv2)
return h_pool2
def random_variable(shape, sigma):
W = np.random.normal(size=shape, scale=sigma).astype(np.float32)
return tf.Variable(W, name="random_Weights", trainable=False)
def random_biases(shape):
b = np.random.uniform(0, 2 * np.pi, size=shape).astype(np.float32)
return tf.Variable(b, name="random_biase", trainable=False)
def fully_connected(conv_out):
with tf.name_scope("fc_1"):
init_dim = np.prod([s.value for s in conv_out.shape if s.value is not None])
h_pool2_flat = tf.reshape(conv_out, [-1, init_dim])
W_fc1 = weight_variable([init_dim, 4096*2])
b_fc1 = bias_variable([4096*2])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
tf.summary.histogram("weights", W_fc1)
tf.summary.histogram("biases", b_fc1)
return h_fc1
def get_next_batch(full_set, batch_nbr, batch_size):
"""
Return the next batch of a dataset.
This function assumes that all the previous batches of this dataset have been taken with the same size.
:param full_set: the full dataset from which the batch will be taken
:param batch_nbr: the number of the batch
:param batch_size: the size of the batch
:return:
"""
index_start = (batch_nbr * batch_size) % full_set.shape[0]
index_stop = ((batch_nbr + 1) * batch_size) % full_set.shape[0]
if index_stop > index_start:
return full_set[index_start:index_stop]
else:
part1 = full_set[index_start:]
part2 = full_set[:index_stop]
return np.vstack((part1, part2))
if __name__ == '__main__':
def main():
SIGMA = 5.0
print("Sigma = {}".format(SIGMA))
......@@ -130,21 +61,7 @@ if __name__ == '__main__':
x_image_distorded = tf.image.random_brightness(x_image, max_delta=30)
tf.summary.image("digit_distorded", x_image_distorded, max_outputs=3)
# Representation layer
h_conv = convolution_mnist(x_image)
out_fc = fully_connected(h_conv) # 95% accuracy
# classification
with tf.name_scope("fc_2"):
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
h_fc1_drop = tf.nn.dropout(out_fc, keep_prob)
dim = np.prod([s.value for s in h_fc1_drop.shape if s.value is not None])
W_fc2 = weight_variable([dim, output_dim])
b_fc2 = bias_variable([output_dim])
tf.summary.histogram("weights", W_fc2)
tf.summary.histogram("biases", b_fc2)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_conv, keep_prob = inference_mnist(x_image, output_dim)
# calcul de la loss
with tf.name_scope("xent"):
......@@ -200,3 +117,7 @@ if __name__ == '__main__':
print("Prediction sample: " + str(preds[:50]))
print("Actual values: " + str(np.argmax(Y_test[:50], axis=1)))
print("Elapsed time: %.4f s" % (stoped - started))
if __name__ == '__main__':
main()
\ No newline at end of file
......@@ -11,7 +11,7 @@ import tensorflow as tf
import numpy as np
import skluc.mldatasets as dataset
from skluc.convert_image_to_records import convert_to
from skluc.neural_networks import conv2d, max_pool_2x2
from skluc.neural_networks import inference_mnist
tf.logging.set_verbosity(tf.logging.ERROR)
......@@ -64,90 +64,6 @@ val.labels = Y_val
val.num_examples = X_val.shape[0]
def conv_relu_pool(input_, kernel_shape, bias_shape):
"""
Generic function for defining a convolutional layer.
:param input_: The input tensor to be convoluted
:param kernel_shape: The shape of the kernels/filters
:param bias_shape: The shape of the bias
:return: The output tensor of the convolution
"""
weights = tf.get_variable("weights", kernel_shape, initializer=tf.random_normal_initializer(stddev=0.1))
biases = tf.get_variable("biases", bias_shape, initializer=tf.constant_initializer(0.0))
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
conv = conv2d(input_, weights)
relu = tf.nn.relu(conv + biases)
tf.summary.histogram("act", relu)
pool = max_pool_2x2(relu)
return pool
def fully_connected(input_, output_dim, act=tf.nn.relu):
"""
Build a fully connected layer using input_ as input and output_dim as output size.
If the input_ is not the correct shape, it is reshaped.
"""
input_dim = np.prod([s.value for s in input_.shape if s.value is not None])
if len(input_.shape) != 2:
print("[WARNING] {} input of fully_connected function has shape size > 2, e.g. {}. Reshaping."
.format(input_.name, len(input_.shape)))
input_flat = tf.reshape(input_, [-1, input_dim])
else:
input_flat = input_
weights = tf.get_variable("weights", [input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=0.1))
biases = tf.get_variable("biases", [output_dim], initializer=tf.constant_initializer(0.0))
fc = tf.matmul(input_flat, weights) + biases
if act is not None:
fc = tf.nn.relu(fc)
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
return fc
def convolution_mnist(input_):
"""
Define the two convolutionnal layers used for mnist.
:param input_: The input images to be convoluted
:return:
"""
with tf.variable_scope("conv_pool_1"):
conv1 = conv_relu_pool(input_, [5, 5, 1, 20], [20])
with tf.variable_scope("conv_pool_2"):
conv2 = conv_relu_pool(conv1, [5, 5, 20, 50], [50])
return conv2
def fc_mnist(input_):
"""
Define the fully connected layers used for mnist.
:param input_: The input tensor
:return:
"""
with tf.variable_scope("fc_relu_1"):
fc = fully_connected(input_, 4096*2)
return fc
def classification_mnist(input_, output_dim):
"""
Define the classification layer used for mnist
:param input_: The input to classify
:param output_dim: The returned output dimension
:return:
"""
with tf.variable_scope("fc_relu_2"):
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
input_drop = tf.nn.dropout(input_, keep_prob)
y_ = fully_connected(input_drop, output_dim)
return y_, keep_prob
def decode(serialized_example):
features = tf.parse_single_example(
serialized_example,
......@@ -204,21 +120,7 @@ def get_tf_record(record_filename, num_epochs, batch_size, distord=True):
return iterator
def inference_mnist(x_image, output_dim):
"""
Compute inference on the class of the given tensor images.
:param x_image: Input tensor images
:param output_dim: The number of class
:return: The predicted classes
"""
h_conv = convolution_mnist(x_image)
out_fc = fc_mnist(h_conv)
y_out, keep_prob = classification_mnist(out_fc, output_dim)
return y_out, keep_prob
def train():
def trainning():
SIGMA = 5.0
num_epochs = 10000
batch_size = 64
......@@ -280,7 +182,7 @@ def train():
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter("results_tfrecord_nn")
summary_writer = tf.summary.FileWriter("/tmp/results_tfrecord_nn")
summary_writer.add_graph(sess.graph)
# Initialize all Variable objects
sess.run(init)
......@@ -323,7 +225,7 @@ def create_records():
if __name__ == '__main__':
create_records()
train()
trainning()
import tensorflow as tf
import time as t
import numpy as np
# --- Usual functions --- #
# todo peutetre renommer ce module en "tensorflow utils"
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="weights")
def weight_variable(shape, trainable=True):
# initial = tf.truncated_normal(shape, stddev=0.1)
# return tf.Variable(initial, name="weights", trainable=trainable)
raise NotImplementedError("This function shouldn't be used anymore, use tf.get_variable instead")
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="biases")
def bias_variable(shape, trainable=True):
# initial = tf.constant(0.1, shape=shape)
# return tf.Variable(initial, name="biases", trainable=trainable)
raise NotImplementedError("This function shouldn't be used anymore, use tf.get_variable instead")
def conv2d(x, W):
......@@ -19,3 +24,139 @@ def conv2d(x, W):
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def get_next_batch(full_set, batch_nbr, batch_size):
"""
Return the next batch of a dataset.
This function assumes that all the previous batches of this dataset have been taken with the same size.
:param full_set: the full dataset from which the batch will be taken
:param batch_nbr: the number of the batch
:param batch_size: the size of the batch
:return:
"""
index_start = (batch_nbr * batch_size) % full_set.shape[0]
index_stop = ((batch_nbr + 1) * batch_size) % full_set.shape[0]
if index_stop > index_start:
return full_set[index_start:index_stop]
else:
part1 = full_set[index_start:]
part2 = full_set[:index_stop]
return np.vstack((part1, part2))
def fully_connected(input_op, output_dim, act=None, name_scope="fully_connected"):
"""
Implement a layer of size
:param input_op:
:param output_dim:
:param act:
:param name_scope:
:return:
"""
with tf.name_scope(name_scope):
init_dim = np.prod([s.value for s in input_op.shape if s.value is not None])
h_pool2_flat = tf.reshape(input_op, [-1, init_dim])
W_fc1 = tf.get_variable("weights", [init_dim, output_dim], initializer=tf.random_normal_initializer(stddev=0.1))
b_fc1 = tf.get_variable("biases", [output_dim], initializer=tf.constant_initializer(0.0))
dense = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
if act is None:
result = dense
else:
result = act(dense)
tf.summary.histogram("weights", W_fc1)
tf.summary.histogram("biases", b_fc1)
return result
def conv_relu_pool(input_, kernel_shape, bias_shape, trainable=True):
"""
Generic function for defining a convolutional layer.
:param input_: The input tensor to be convoluted
:param kernel_shape: The shape of the kernels/filters
:param bias_shape: The shape of the bias
:return: The output tensor of the convolution
"""
with tf.name_scope("convolution"):
weights = tf.get_variable("weights", kernel_shape, initializer=tf.random_normal_initializer(stddev=0.1), trainable=trainable)
biases = tf.get_variable("biases", bias_shape, initializer=tf.constant_initializer(0.0), trainable=trainable)
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
conv = conv2d(input_, weights)
relu = tf.nn.relu(conv + biases)
tf.summary.histogram("act", relu)
pool = max_pool_2x2(relu)
return pool
def tf_op(d_feed_dict, ops):
"""
Simple fct for running tensorflow op.
:param d_feed_dict:
:param ops:
:return:
"""
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run([init])
sess.run(ops, feed_dict=d_feed_dict)
def convolution_mnist(input_, trainable=True):
"""
Define the two convolutionnal layers used for mnist.
:param input_: The input images to be convoluted
:param trainable: Say if these convolutional layers should be trained
:return:
"""
with tf.variable_scope("conv_pool_1"):
conv1 = conv_relu_pool(input_, [5, 5, 1, 20], [20], trainable=trainable)
with tf.variable_scope("conv_pool_2"):
conv2 = conv_relu_pool(conv1, [5, 5, 20, 50], [50], trainable=trainable)
return conv2
def fc_mnist(input_):
"""
Define the fully connected layers used for mnist.
:param input_: The input tensor
:return:
"""
with tf.variable_scope("fc_relu_1"):
fc = fully_connected(input_, 4096*2, act=tf.nn.relu)
return fc
def classification_mnist(input_, output_dim):
"""
Define the classification layer used for mnist
:param input_: The input to classify
:param output_dim: The returned output dimension
:return:
"""
with tf.variable_scope("fc_relu_2"):
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
input_drop = tf.nn.dropout(input_, keep_prob)
y_ = fully_connected(input_drop, output_dim)
return y_, keep_prob
def inference_mnist(x_image, output_dim):
"""
Compute inference on the class of the given tensor images.
:param x_image: Input tensor images
:param output_dim: The number of class
:return: The predicted classes
"""
h_conv = convolution_mnist(x_image)
out_fc = fc_mnist(h_conv)
y_out, keep_prob = classification_mnist(out_fc, output_dim)
return y_out, keep_prob
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment