Skip to content
Snippets Groups Projects
dense_vgg_end_to_end.py 12.4 KiB
Newer Older
"""
Benchmark VGG: Benchmarking deepstrom versus other architectures of the VGG network.

Usage:
    benchmark_vgg dense [-a value] [-v size] [-e numepoch] [-s batchsize] [-D reprdim]

Options:
    --help -h                               Display help and exit.
    -e numepoch --num-epoch=numepoch        The number of epoch.
    -s batchsize --batch-size=batchsize     The number of example in each batch
    -v size --validation-size size          The size of the validation set [default: 10000]
    -a value --seed value                   The seed value used for all randomization processed [default: 0]
    -D reprdim --out-dim=reprdim            The dimension of the final representation
"""
import sys
import time as t
import numpy as np
import tensorflow as tf
import docopt
Luc Giffon's avatar
Luc Giffon committed
import skluc.main.data.mldatasets as dataset
from skluc.main.tensorflow_.utils import fully_connected, batch_generator, classification_cifar
from skluc.main.utils import logger
import keras
from keras.models import Sequential
from keras.layers import Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.initializers import he_normal
from keras.layers.normalization import BatchNormalization


def VGG19(input_shape):
    # with tf.variable_scope("block1_conv1"):
    #     weights = tf.get_variable("weights", (3, 3, 3, 64), initializer=tf.random_normal_initializer(stddev=0.1), trainable=trainable)
    #     biases = tf.get_variable("biases", (64), initializer=tf.constant_initializer(0.0), trainable=trainable)
    #     regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
    #     conv = tf.nn.conv2d(input_, weights, strides=[1, 1, 1, 1], padding='SAME', kernel_regularizer=regularizer)
    #     batch_norm = tf.nn.batch_normalization(conv, variance_epsilon=1e-3)
    #     relu = tf.nn.relu(conv + biases)
    #     tf.summary.histogram("act", relu)
    #     in order to reduce dimensionality, use bigger pooling size
        # pool = max_pool(relu, pool_size=pool_size)
    # with tf.variable_scope("conv_pool_2"):
    #     conv2 = conv_relu_pool(conv1, [5, 5, 6, 16], [16], pool_size=2, trainable=trainable)
    weight_decay = 0.0001
    # build model
    model = Sequential()

    # Block 1
    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv1', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

    # Block 2
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

    # Block 3
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

    # Block 4
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))

    # Block 5
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))

    return model


def fct_dense(input_, out_dim, two_layers):
    with tf.variable_scope("dense_layers"):
        fc_1 = fully_connected(input_, out_dim, act=tf.nn.relu, variable_scope="fc1")
        if two_layers:
            fc_2 = fully_connected(fc_1, out_dim, act=tf.nn.relu, variable_scope="fc2")
        else:
            fc_2 = fc_1
    out = fc_2
    return out


if __name__ == '__main__':
    # todo special treat for each type of execution
    with tf.Graph().as_default():
        arguments = docopt.docopt(__doc__)
        NUM_EPOCH = int(arguments["--num-epoch"])
        BATCH_SIZE = int(arguments["--batch-size"])
        SEED_TRAIN_VALIDATION = 0
        SEED = int(arguments["--seed"])
        OUT_DIM = int(arguments["--out-dim"]) if arguments["--out-dim"] is not None else None
        VALIDATION_SIZE = int(arguments["--validation-size"])

        data = dataset.Cifar10Dataset(validation_size=VALIDATION_SIZE, seed=SEED_TRAIN_VALIDATION)
        data.load()
        data.normalize()
        data.data_astype(np.float32)
        data.labels_astype(np.float32)
        data.to_image()
        data.to_one_hot()

        logger.debug("Start benchmark with parameters: {}".format(" ".join(sys.argv[1:])))
        logger.debug("Using dataset {} with validation size {} and seed for spliting set {}.".format(data.s_name, data.validation_size, data.seed))
        logger.debug("Shape of train set data: {}; shape of train set labels: {}".format(data.train[0].shape, data.train[1].shape))
        logger.debug("Shape of validation set data: {}; shape of validation set labels: {}".format(data.validation[0].shape, data.validation[1].shape))
        logger.debug("Shape of test set data: {}; shape of test set labels: {}".format(data.test[0].shape, data.test[1].shape))
        logger.debug("Sample of label: {}".format(data.train[1][0]))
        # todo separated function for parameters parsing

        input_dim, output_dim = data.train[0].shape[1:], data.train[1].shape[1]

        x = tf.placeholder(tf.float32, shape=[None, *input_dim], name="x")
        y = tf.placeholder(tf.float32, shape=[None, output_dim], name="label")

        vgg_conv_model = VGG19(input_dim)
        conv_x = vgg_conv_model(x)
        input_classif = fct_dense(conv_x, OUT_DIM, False)
        classif, keep_prob = classification_cifar(input_classif, output_dim)

        # calcul de la loss
        with tf.name_scope("xent"):
            cross_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=classif, name="xentropy"),
                name="xentropy_mean")
            tf.summary.scalar('loss-xent', cross_entropy)

        # todo learning rate as hyperparameter
        # calcul du gradient
        with tf.name_scope("train"):
            global_step = tf.Variable(0, name="global_step", trainable=False)
            train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy,
                                                                                  global_step=global_step)

        # calcul de l'accuracy
        with tf.name_scope("accuracy"):
            predictions = tf.argmax(classif, 1)
            correct_prediction = tf.equal(predictions, tf.argmax(y, 1))
            accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar("accuracy", accuracy_op)

        merged_summary = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        # Create a session for running Ops on the Graph.
        # Instantiate a SummaryWriter to output summaries and the Graph.
        # summary_writer = tf.summary.FileWriter("debug_benchmark_vgg")
        # Initialize all Variable objects
        # actual learning

        summary_writer = tf.summary.FileWriter("tb_dense_end_to_end")
        with tf.Session() as sess:
            summary_writer.add_graph(sess.graph)
            # summary_writer.add_graph(sess.graph)
            # Initialize all Variable objects
            sess.run(init)
            # actual learning
            # feed_dict_val = {x: data.validation[0], y: data.validation[1], keep_prob: 1.0}
            global_start = t.time()
            feed_dict = {}
            feed_dict_val = {}
            feed_dict_test = {}
            for i in range(NUM_EPOCH):
                j = 0
                start = t.time()
                for X_batch, Y_batch in batch_generator(data.train[0], data.train[1], BATCH_SIZE, True):
                    feed_dict.update({x: X_batch, y: Y_batch, keep_prob: 0.5})
                    _, loss, acc = sess.run([train_optimizer, cross_entropy, accuracy_op], feed_dict=feed_dict)
                    if j % 100 == 0:
                        logger.debug("epoch: {}/{}; batch: {}/{}; loss: {}; acc: {}".format(i, NUM_EPOCH, j, int(data.train[0].shape[0]/BATCH_SIZE), loss, acc))
                        # summary_str = sess.run(merged_summary, feed_dict=feed_dict)
                        # summary_writer.add_summary(summary_str, j)
                    j += 1

            training_time = t.time() - global_start
            accuracies_val = []
            i = 0
            for X_batch, Y_batch in batch_generator(data.validation[0], data.validation[1], 1000, False):
                feed_dict_val.update({x: X_batch, y: Y_batch, keep_prob: 1.0})
                accuracy = sess.run([accuracy_op], feed_dict=feed_dict_val)
                accuracies_val.append(accuracy[0])
                i += 1

            accuracies_test = []
            i = 0
            for X_batch, Y_batch in batch_generator(data.test[0], data.test[1], 1000, False):
                feed_dict_test.update({x: X_batch, y: Y_batch, keep_prob: 1.0})
                accuracy = sess.run([accuracy_op], feed_dict=feed_dict_test)
                accuracies_test.append(accuracy[0])
                i += 1

        global_acc_val = sum(accuracies_val) / i
        global_acc_test = sum(accuracies_test) / i
        printed_r_list = [str(global_acc_val),
                          str(global_acc_test),
                          str(training_time),
                          str(NUM_EPOCH),
                          str(BATCH_SIZE),
                          str(OUT_DIM),
                          str(VALIDATION_SIZE),
                          str(SEED),
                          ]
        print(",".join(printed_r_list))