diff --git a/skluc/examples/__init__.py b/skluc/examples/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/skluc/examples/fc_nn.py b/skluc/examples/fc_nn.py
new file mode 100644
index 0000000000000000000000000000000000000000..bea2ea279d49547917cb13f07e0a0ae7882872b4
--- /dev/null
+++ b/skluc/examples/fc_nn.py
@@ -0,0 +1,202 @@
+"""
+Convolutional Neural Netwok implementation in tensorflow whith fully connected layers.
+
+The neural network is ran against the mnist dataset and we can see an example of distortion of input in the case
+where the input comes from memory.
+"""
+
+import tensorflow as tf
+import numpy as np
+import skluc.mldatasets as dataset
+from skluc.neural_networks import bias_variable, weight_variable, conv2d, max_pool_2x2
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import time as t
+
+from sklearn.preprocessing import LabelBinarizer
+
+# Preparing the dataset #########################
+
+enc = LabelBinarizer()
+mnist = dataset.MnistDataset()
+mnist = mnist.load()
+X_train, Y_train = mnist["train"]
+X_train = np.array(X_train / 255)
+enc.fit(Y_train)
+Y_train = np.array(enc.transform(Y_train))
+X_test, Y_test = mnist["test"]
+X_test = np.array(X_test / 255)
+Y_test = np.array(enc.transform(Y_test))
+
+X_train = X_train.astype(np.float32)
+permut = np.random.permutation(X_train.shape[0])
+val_size = 5000
+X_val = X_train[permut[:val_size]]
+X_train = X_train[permut[val_size:]]
+Y_val = Y_train[permut[:val_size]]
+Y_train = Y_train[permut[val_size:]]
+X_test = X_test.astype(np.float32)
+Y_train = Y_train.astype(np.float32)
+Y_test = Y_test.astype(np.float32)
+
+#################################################
+
+
+def convolution_mnist(input):
+    with tf.name_scope("conv_pool_1"):
+        # 32 is the number of filter we'll use. e.g. the number of different
+        # shapes this layer is able to recognize
+        W_conv1 = weight_variable([5, 5, 1, 20])
+        tf.summary.histogram("weights conv1", W_conv1)
+        b_conv1 = bias_variable([20])
+        tf.summary.histogram("biases conv1", b_conv1)
+        # -1 is here to keep the total size constant (784)
+        h_conv1 = tf.nn.relu(conv2d(input, W_conv1) + b_conv1)
+        tf.summary.histogram("act conv1", h_conv1)
+        h_pool1 = max_pool_2x2(h_conv1)
+
+    with tf.name_scope("conv_pool_2"):
+        W_conv2 = weight_variable([5, 5, 20, 50])
+        tf.summary.histogram("weights conv2", W_conv2)
+        b_conv2 = bias_variable([50])
+        tf.summary.histogram("biases conv2", b_conv2)
+        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
+        tf.summary.histogram("act conv2", h_conv2)
+        h_pool2 = max_pool_2x2(h_conv2)
+
+    return h_pool2
+
+
+def random_variable(shape, sigma):
+    W = np.random.normal(size=shape, scale=sigma).astype(np.float32)
+    return tf.Variable(W, name="random_Weights", trainable=False)
+
+
+def random_biases(shape):
+    b = np.random.uniform(0, 2 * np.pi, size=shape).astype(np.float32)
+    return tf.Variable(b, name="random_biase", trainable=False)
+
+
+def fully_connected(conv_out):
+    with tf.name_scope("fc_1"):
+        init_dim = np.prod([s.value for s in conv_out.shape if s.value is not None])
+        h_pool2_flat = tf.reshape(conv_out, [-1, init_dim])
+        W_fc1 = weight_variable([init_dim, 4096*2])
+        b_fc1 = bias_variable([4096*2])
+        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
+        tf.summary.histogram("weights", W_fc1)
+        tf.summary.histogram("biases", b_fc1)
+
+    return h_fc1
+
+
+def get_next_batch(full_set, batch_nbr, batch_size):
+    """
+    Return the next batch of a dataset.
+
+    This function assumes that all the previous batches of this dataset have been taken with the same size.
+
+    :param full_set: the full dataset from which the batch will be taken
+    :param batch_nbr: the number of the batch
+    :param batch_size: the size of the batch
+    :return:
+    """
+    index_start = (batch_nbr * batch_size) % full_set.shape[0]
+    index_stop = ((batch_nbr + 1) * batch_size) % full_set.shape[0]
+    if index_stop > index_start:
+        return full_set[index_start:index_stop]
+    else:
+        part1 = full_set[index_start:]
+        part2 = full_set[:index_stop]
+        return np.vstack((part1, part2))
+
+
+if __name__ == '__main__':
+    SIGMA = 5.0
+    print("Sigma = {}".format(SIGMA))
+
+    with tf.Graph().as_default():
+        input_dim, output_dim = X_train.shape[1], Y_train.shape[1]
+
+        x = tf.placeholder(tf.float32, shape=[None, input_dim], name="x")
+        y_ = tf.placeholder(tf.float32, shape=[None, output_dim], name="labels")
+
+        # side size is width or height of the images
+        side_size = int(np.sqrt(input_dim))
+        x_image = tf.reshape(x, [-1, side_size, side_size, 1])
+        tf.summary.image("digit", x_image, max_outputs=3)
+        # this is how we apply distortion but it is not used afterward
+        x_image_distorded = tf.image.random_brightness(x_image, max_delta=30)
+        tf.summary.image("digit_distorded", x_image_distorded, max_outputs=3)
+
+        # Representation layer
+        h_conv = convolution_mnist(x_image)
+        out_fc = fully_connected(h_conv)  # 95% accuracy
+
+        # classification
+        with tf.name_scope("fc_2"):
+            keep_prob = tf.placeholder(tf.float32, name="keep_prob")
+            h_fc1_drop = tf.nn.dropout(out_fc, keep_prob)
+            dim = np.prod([s.value for s in h_fc1_drop.shape if s.value is not None])
+            W_fc2 = weight_variable([dim, output_dim])
+            b_fc2 = bias_variable([output_dim])
+            tf.summary.histogram("weights", W_fc2)
+            tf.summary.histogram("biases", b_fc2)
+
+            y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
+
+        # calcul de la loss
+        with tf.name_scope("xent"):
+            cross_entropy = tf.reduce_mean(
+                tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv, name="xentropy"),
+                name="xentropy_mean")
+            tf.summary.scalar('loss-xent', cross_entropy)
+
+        # calcul du gradient
+        with tf.name_scope("train"):
+            global_step = tf.Variable(0, name="global_step", trainable=False)
+            train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy, global_step=global_step)
+
+        # calcul de l'accuracy
+        with tf.name_scope("accuracy"):
+            predictions = tf.argmax(y_conv, 1)
+            correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
+            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+            tf.summary.scalar("accuracy", accuracy)
+
+        merged_summary = tf.summary.merge_all()
+
+        init = tf.global_variables_initializer()
+        # Create a session for running Ops on the Graph.
+        sess = tf.Session()
+        # Instantiate a SummaryWriter to output summaries and the Graph.
+        summary_writer = tf.summary.FileWriter("results_fc_distorded")
+        summary_writer.add_graph(sess.graph)
+        # Initialize all Variable objects
+        sess.run(init)
+        # actual learning
+        started = t.time()
+        feed_dict_val = {x: X_val, y_: Y_val, keep_prob: 1.0}
+        for i in range(2):
+            X_batch = get_next_batch(X_train, i, 64)
+            Y_batch = get_next_batch(Y_train, i, 64)
+            feed_dict = {x: X_batch, y_: Y_batch, keep_prob: 0.5}
+            # le _ est pour capturer le retour de "train_optimizer" qu'il faut appeler
+            # pour calculer le gradient mais dont l'output ne nous interesse pas
+            _, loss = sess.run([train_optimizer, cross_entropy], feed_dict=feed_dict)
+            if i % 100 == 0:
+                print('step {}, loss {} (with dropout)'.format(i, loss))
+                r_accuracy = sess.run([accuracy], feed_dict=feed_dict_val)
+                print("accuracy: {} on validation set (without dropout).".format(r_accuracy))
+                summary_str = sess.run(merged_summary, feed_dict=feed_dict)
+                summary_writer.add_summary(summary_str, i)
+
+        stoped = t.time()
+        accuracy, preds = sess.run([accuracy, predictions], feed_dict={
+            x: X_test, y_: Y_test, keep_prob: 1.0})
+        print('test accuracy %g' % accuracy)
+        np.set_printoptions(threshold=np.nan)
+        print("Prediction sample: " + str(preds[:50]))
+        print("Actual values: " + str(np.argmax(Y_test[:50], axis=1)))
+        print("Elapsed time: %.4f s" % (stoped - started))
\ No newline at end of file
diff --git a/skluc/examples/tfrecord_nn.py b/skluc/examples/tfrecord_nn.py
new file mode 100644
index 0000000000000000000000000000000000000000..3620f287d1d396bb78846395b0028a963008f0a9
--- /dev/null
+++ b/skluc/examples/tfrecord_nn.py
@@ -0,0 +1,329 @@
+"""
+Convolutional Neural Netwok implementation in tensorflow using tfrecords as inputs.
+
+From memory, the tfrecords ar written then read.
+
+The neural network is ran against the mnist dataset and we can see an example of distortion of input in
+the case of tfrecords data source.
+"""
+
+import tensorflow as tf
+import numpy as np
+import skluc.mldatasets as dataset
+from skluc.convert_image_to_records import convert_to
+from skluc.neural_networks import conv2d, max_pool_2x2
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import time as t
+from collections import namedtuple
+
+dataset_mnist = dataset.MnistDataset()
+dataset_mnist.load()
+mnist = dataset_mnist.to_image()
+
+X_train, Y_train = mnist["train"]
+X_test, Y_test = mnist["test"]
+
+# normalize
+X_train = np.array(X_train / 255)
+X_test = np.array(X_test / 255)
+X_train = X_train.astype(np.float32)
+Y_train = Y_train.astype(np.float32)
+
+# define validation
+permut = np.random.permutation(X_train.shape[0])
+val_size = 5000
+
+X_val = X_train[permut[:val_size]]
+Y_val = Y_train[permut[:val_size]]
+Y_val = Y_val.astype(np.float32)
+
+X_train = X_train[permut[val_size:]]
+Y_train = Y_train[permut[val_size:]]
+
+# Define test set
+X_test = X_test.astype(np.float32)
+Y_test = Y_test.astype(np.float32)
+
+# build dataset objects
+
+train = namedtuple("Dataset", ["images", "labels", "num_examples"])
+train.images = X_train
+train.labels = Y_train
+train.num_examples = X_train.shape[0]
+
+test = namedtuple("Dataset", ["images", "labels", "num_examples"])
+test.images = X_test
+test.labels = Y_test
+test.num_examples = X_test.shape[0]
+
+val = namedtuple("Dataset", ["images", "labels", "num_examples"])
+val.images = X_val
+val.labels = Y_val
+val.num_examples = X_val.shape[0]
+
+
+def conv_relu_pool(input_, kernel_shape, bias_shape):
+    """
+    Generic function for defining a convolutional layer.
+
+    :param input_: The input tensor to be convoluted
+    :param kernel_shape: The shape of the kernels/filters
+    :param bias_shape: The shape of the bias
+    :return: The output tensor of the convolution
+    """
+    weights = tf.get_variable("weights", kernel_shape, initializer=tf.random_normal_initializer(stddev=0.1))
+    biases = tf.get_variable("biases", bias_shape, initializer=tf.constant_initializer(0.0))
+    tf.summary.histogram("weights", weights)
+    tf.summary.histogram("biases", biases)
+    conv = conv2d(input_, weights)
+    relu = tf.nn.relu(conv + biases)
+    tf.summary.histogram("act", relu)
+    pool = max_pool_2x2(relu)
+    return pool
+
+
+def fully_connected(input_, output_dim, act=tf.nn.relu):
+    """
+    Build a fully connected layer using input_ as input and output_dim as output size.
+
+    If the input_ is not the correct shape, it is reshaped.
+    """
+    input_dim = np.prod([s.value for s in input_.shape if s.value is not None])
+    if len(input_.shape) != 2:
+        print("[WARNING] {} input of fully_connected function has shape size > 2, e.g. {}. Reshaping."
+              .format(input_.name, len(input_.shape)))
+        input_flat = tf.reshape(input_, [-1, input_dim])
+    else:
+        input_flat = input_
+    weights = tf.get_variable("weights", [input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=0.1))
+    biases = tf.get_variable("biases", [output_dim], initializer=tf.constant_initializer(0.0))
+    fc = tf.matmul(input_flat, weights) + biases
+    if act is not None:
+        fc = tf.nn.relu(fc)
+    tf.summary.histogram("weights", weights)
+    tf.summary.histogram("biases", biases)
+    return fc
+
+
+def convolution_mnist(input_):
+    """
+    Define the two convolutionnal layers used for mnist.
+
+    :param input_: The input images to be convoluted
+    :return:
+    """
+    with tf.variable_scope("conv_pool_1"):
+        conv1 = conv_relu_pool(input_, [5, 5, 1, 20], [20])
+    with tf.variable_scope("conv_pool_2"):
+        conv2 = conv_relu_pool(conv1, [5, 5, 20, 50], [50])
+    return conv2
+
+
+def fc_mnist(input_):
+    """
+    Define the fully connected layers used for mnist.
+
+    :param input_: The input tensor
+    :return:
+    """
+    with tf.variable_scope("fc_relu_1"):
+        fc = fully_connected(input_, 4096*2)
+    return fc
+
+
+def classification_mnist(input_, output_dim):
+    """
+    Define the classification layer used for mnist
+
+    :param input_: The input to classify
+    :param output_dim: The returned output dimension
+    :return:
+    """
+    with tf.variable_scope("fc_relu_2"):
+        keep_prob = tf.placeholder(tf.float32, name="keep_prob")
+        input_drop = tf.nn.dropout(input_, keep_prob)
+        y_ = fully_connected(input_drop, output_dim)
+    return y_, keep_prob
+
+
+def decode(serialized_example):
+    features = tf.parse_single_example(
+        serialized_example,
+        features={
+            'image_raw': tf.FixedLenFeature([], tf.string),
+            'label': tf.FixedLenFeature([], tf.int64),
+            'height': tf.FixedLenFeature([], tf.int64),
+            'width': tf.FixedLenFeature([], tf.int64),
+            'depth': tf.FixedLenFeature([], tf.int64)
+        }
+    )
+    image = tf.decode_raw(features['image_raw'], tf.float32)
+    image.set_shape((784))
+
+    label = tf.cast(features['label'], tf.int32)
+    label = tf.one_hot(label, 10)
+
+    return image, label
+
+
+def distortion(image, label):
+    """
+    Apply som distortion to the input images
+
+    :param image:
+    :param label:
+    :return:
+    """
+    distorted_image = tf.image.random_brightness(image,
+                                                 max_delta=15)
+    return distorted_image, label
+
+
+def get_tf_record(record_filename, num_epochs, batch_size, distord=True):
+    """
+    Gives an iteror to plug to the input of the neural network.
+
+    The iterator gives a new bach on demand.
+
+    :param record_filename: The filename where to find tfrecords.
+    :param num_epochs: The number of epoch we will need
+    :param batch_size: The size of each returned batch
+    :param distord: Parameter for applying or not distortion on input
+    :return: The iterator to plug to the input of the network
+    """
+    dataset = tf.data.TFRecordDataset(record_filename)
+    dataset = dataset.repeat(num_epochs)
+    dataset = dataset.map(decode)
+    # if distord:
+    #     dataset = dataset.map(distortion)
+    dataset = dataset.shuffle(1000 + 3 * batch_size)
+    dataset = dataset.batch(batch_size)  # combine les éléments en batchs
+    iterator = dataset.make_one_shot_iterator()
+    return iterator
+
+
+def inference_mnist(x_image, output_dim):
+    """
+    Compute inference on the class of the given tensor images.
+
+    :param x_image: Input tensor images
+    :param output_dim: The number of class
+    :return: The predicted classes
+    """
+    h_conv = convolution_mnist(x_image)
+    out_fc = fc_mnist(h_conv)
+    y_out, keep_prob = classification_mnist(out_fc, output_dim)
+    return y_out, keep_prob
+
+
+def train():
+    SIGMA = 5.0
+    num_epochs = 10000
+    batch_size = 64
+    print("Sigma = {}".format(SIGMA))
+
+    with tf.Graph().as_default():
+        # retourne batchs après batchs
+        iterator_train = get_tf_record("/tmp/data/mnist/mnist_train.tfrecords", num_epochs, batch_size, distord=False)
+        iterator_val = get_tf_record("/tmp/data/mnist/mnist_val.tfrecords", num_epochs, batch_size)
+        iterator_test = get_tf_record("/tmp/data/mnist/mnist_test.tfrecords", num_epochs, batch_size)
+
+        input_dim, output_dim = 784, 10
+
+        with tf.name_scope("train_set"):
+            x_image_train, y_train = iterator_train.get_next()
+            x_image_train = tf.reshape(x_image_train, [-1, 28, 28, 1])
+            tf.summary.image("digit", x_image_train, max_outputs=3)
+
+        with tf.name_scope("val_set"):
+            x_image_val, y_val = iterator_val.get_next()
+            x_image_val = tf.reshape(x_image_val, tf.stack([-1, 28, 28, 1]))
+
+        with tf.name_scope("test_set"):
+            x_image_test, y_test = iterator_test.get_next()
+            x_image_test = tf.reshape(x_image_test, tf.stack([-1, 28, 28, 1]))
+
+        with tf.variable_scope("inference") as scope_inference:
+            y_train_out, keep_prob_train = inference_mnist(x_image_train, output_dim)
+            scope_inference.reuse_variables()
+            y_val_out, keep_prob_val = inference_mnist(x_image_val, output_dim)
+            scope_inference.reuse_variables()
+            y_test_out, keep_prob_test = inference_mnist(x_image_test, output_dim)
+
+        # calcul de la loss
+        with tf.name_scope("xent"):
+            cross_entropy = tf.reduce_mean(
+                tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=y_train_out, name="xentropy"),
+                name="xentropy_mean")
+            tf.summary.scalar('loss-xent', cross_entropy)
+
+        # calcul du gradient
+        with tf.name_scope("train"):
+            global_step = tf.Variable(0, name="global_step", trainable=False)
+            train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy, global_step=global_step)
+
+        # calcul de l'accuracy
+        with tf.name_scope("accuracy"):
+            predictions_val = tf.argmax(y_val_out, 1)
+            correct_prediction_val = tf.equal(predictions_val, tf.argmax(y_val, 1))
+            accuracy_val = tf.reduce_mean(tf.cast(correct_prediction_val, tf.float32))
+            tf.summary.scalar("accuracy", accuracy_val)
+            predictions_test = tf.argmax(y_test_out, 1)
+            correct_prediction_test = tf.equal(predictions_test, tf.argmax(y_test, 1))
+            accuracy_test = tf.reduce_mean(tf.cast(correct_prediction_test, tf.float32))
+
+        merged_summary = tf.summary.merge_all()
+
+        init = tf.global_variables_initializer()
+        # Create a session for running Ops on the Graph.
+        sess = tf.Session()
+        # Instantiate a SummaryWriter to output summaries and the Graph.
+        summary_writer = tf.summary.FileWriter("results_tfrecord_nn")
+        summary_writer.add_graph(sess.graph)
+        # Initialize all Variable objects
+        sess.run(init)
+        # actual learning
+        started = t.time()
+        feed_dict = {
+            keep_prob_val: 1.0,
+            keep_prob_train: 0.5,
+            keep_prob_test: 1.0
+        }
+        for i in range(num_epochs):
+            # run training and get the loss
+            _, loss = sess.run([train_optimizer, cross_entropy], feed_dict=feed_dict)
+
+            if i % 100 == 0:
+                print('step {}, loss {} (with dropout)'.format(i, loss))
+                r_accuracy = sess.run([accuracy_val], feed_dict=feed_dict)
+                print("accuracy: {} on validation set (without dropout).".format(r_accuracy))
+                summary_str = sess.run(merged_summary, feed_dict=feed_dict)
+                summary_writer.add_summary(summary_str, i)
+
+        stoped = t.time()
+        accuracy_eval, preds_eval, exp_eval = sess.run([accuracy_test, predictions_test, y_test], feed_dict=feed_dict)
+        print('test accuracy %g' % accuracy_eval)
+        np.set_printoptions(threshold=np.nan)
+        print("Prediction sample: " + str(preds_eval[:50]))
+        print("Actual values: " + str(np.argmax(exp_eval[:50], axis=1)))
+        print("Elapsed time: %.4f s" % (stoped - started))
+
+
+def create_records():
+    import os
+    directory = "/tmp/data/mnist"
+
+    if not os.path.exists(os.path.join(directory, "mnist_train.tfrecords")):
+        convert_to(train, "mnist_train", directory)
+        convert_to(test, "mnist_test", directory)
+        convert_to(val, "mnist_val", directory)
+
+
+if __name__ == '__main__':
+    create_records()
+    train()
+
+
+
diff --git a/skluc/examples/write_read_tfrecords.py b/skluc/examples/write_read_tfrecords.py
new file mode 100644
index 0000000000000000000000000000000000000000..f31083ddd684b15d315947c23ebfdfa0aac86763
--- /dev/null
+++ b/skluc/examples/write_read_tfrecords.py
@@ -0,0 +1,83 @@
+"""
+Example (with mnist) on how to read and write tf records.
+"""
+
+import tensorflow as tf
+import os
+import numpy as np
+import skluc.mldatasets as dataset
+from skluc.convert_image_to_records import convert_to
+import matplotlib.pyplot as plt
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+from collections import namedtuple
+
+dataset_mnist = dataset.MnistDataset()
+dataset_mnist.load()
+mnist = dataset_mnist.to_image()
+
+X_train, Y_train = mnist["train"]
+X_train = np.array(X_train / 255)
+X_test, Y_test = mnist["test"]
+X_test = np.array(X_test / 255)
+
+X_train = X_train.astype(np.float32)
+permut = np.random.permutation(X_train.shape[0])
+val_size = 5000
+X_val = X_train[permut[:val_size]]
+X_train = X_train[permut[val_size:]]
+Y_val = Y_train[permut[:val_size]]
+Y_train = Y_train[permut[val_size:]]
+X_test = X_test.astype(np.float32)
+Y_train = Y_train.astype(np.float32)
+Y_test = Y_test.astype(np.float32)
+
+train = namedtuple("Dataset", ["images", "labels", "num_examples"])
+train.images = X_train
+train.labels = Y_train
+train.num_examples = X_train.shape[0]
+
+test = namedtuple("Dataset", ["images", "labels", "num_examples"])
+test.images = X_test
+test.labels = Y_test
+test.num_examples = X_test.shape[0]
+
+val = namedtuple("Dataset", ["images", "labels", "num_examples"])
+val.images = X_val
+val.labels = Y_val
+val.num_examples = X_val.shape[0]
+
+DIRECTORY = "/tmp/data/mnist"
+
+
+def write_tf_record_mnist():
+    convert_to(train, "mnist_train", DIRECTORY)
+    convert_to(test, "mnist_test", DIRECTORY)
+    convert_to(val, "mnist_val", DIRECTORY)
+
+
+def read_tf_record_mnist():
+    reconstructed_images = []
+    record_iterator = tf.python_io.tf_record_iterator(path=os.path.join(DIRECTORY, "mnist_train.tfrecords"))
+    for string_record in record_iterator:
+        example = tf.train.Example()
+        example.ParseFromString(string_record)
+        height = int(example.features.feature["height"].int64_list.value[0])
+        width = int(example.features.feature["width"].int64_list.value[0])
+        depth = int(example.features.feature["depth"].int64_list.value[0])
+        img_string = (example.features.feature["image_raw"].bytes_list.value[0])
+        label = (example.features.feature["label"].int64_list.value[0])
+
+        img_1d = np.fromstring(img_string, dtype=np.uint32)
+        reconstructed_img = img_1d.reshape((height, width, depth))
+        reconstructed_images.append((reconstructed_img, label))
+
+    plt.imshow(reconstructed_images[0][0][:,:,0])
+    plt.show()
+    print(reconstructed_images[0][1])
+
+
+if __name__ == '__main__':
+    # write_tf_record_mnist()
+    read_tf_record_mnist()
\ No newline at end of file