diff --git a/main/experiments/benchmark_vgg_end_to_end_new.py b/main/experiments/benchmark_vgg_end_to_end_new.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e143e5a2659a1cebfed6c5e29a260c6e6980f30
--- /dev/null
+++ b/main/experiments/benchmark_vgg_end_to_end_new.py
@@ -0,0 +1,440 @@
+"""
+Benchmark VGG: Benchmarking deepstrom versus other architectures of the VGG network.
+
+Usage:
+    benchmark_vgg deepstrom [-r] [-a value] [-v size] [-e numepoch] [-s batchsize] [-D reprdim] [-m size] (-R|-L|-C|-E|-P|-S|-A|-T|-M) [-g gammavalue] [-c cvalue] [-n]
+
+Options:
+    --help -h                               Display help and exit.
+    -e numepoch --num-epoch=numepoch        The number of epoch.
+    -s batchsize --batch-size=batchsize     The number of example in each batch
+    -v size --validation-size size          The size of the validation set [default: 10000]
+    -a value --seed value                   The seed value used for all randomization processed [default: 0]
+    -D reprdim --out-dim=reprdim            The dimension of the final representation
+    -m size --nys-size size                 The number of example in the nystrom subsample.
+    -n --non-linear                         Tell Nystrom to use the non linear activation function on its output.
+    -r --real-nystrom                       Use the real w matrix
+    -g gammavalue --gamma gammavalue        The value of gamma for rbf, chi or hyperbolic tangent kernel (deepstrom and deepfriedconvnet)
+    -c cvalue --intercept-constant cvalue   The value of the intercept constant for the hyperbolic tangent kernel.
+    -R --rbf-kernel                         Says if the rbf kernel should be used for nystrom.
+    -L --linear-kernel                      Says if the linear kernel should be used for nystrom.
+    -C --chi-square-kernel                  Says if the basic additive chi square kernel should be used for nystrom.
+    -E --exp-chi-square-kernel              Says if the exponential chi square kernel should be used for nystrom.
+    -P --chi-square-PD-kernel               Says if the Positive definite version of the basic additive chi square kernel should be used for nystrom.
+    -S --sigmoid-kernel                     Says it the sigmoid kernel should be used for nystrom.
+    -A --laplacian-kernel                   Says if the laplacian kernel should be used for nystrom.
+    -T --stacked-kernel                     Says if the kernels laplacian, chi2 and rbf in a stacked setting should be used for nystrom.
+    -M --sumed-kernel                       Says if the kernels laplacian, chi2 and rbf in a summed setting should be used for nystrom.
+"""
+import sys
+import os
+import time as t
+import numpy as np
+import tensorflow as tf
+import docopt
+from keras import Model
+from keras.preprocessing.image import ImageDataGenerator
+import skluc.main.data.mldatasets as dataset
+from skluc.main.tensorflow_.kernel_approximation import nystrom_layer
+from skluc.main.tensorflow_.utils import batch_generator, classification_cifar
+from skluc.main.tensorflow_.kernel import tf_rbf_kernel, tf_linear_kernel, tf_chi_square_CPD, tf_chi_square_CPD_exp, \
+    tf_chi_square_PD, tf_sigmoid_kernel, tf_laplacian_kernel, tf_stack_of_kernels, tf_sum_of_kernels
+from skluc.main.utils import logger, log_memory_usage
+import keras
+from keras.models import Sequential, load_model
+from keras.layers import Activation
+from keras.layers import Conv2D, MaxPooling2D
+from keras.initializers import he_normal
+from keras.layers.normalization import BatchNormalization
+
+
+def VGG19(input_shape):
+    # with tf.variable_scope("block1_conv1"):
+    #     weights = tf.get_variable("weights", (3, 3, 3, 64), initializer=tf.random_normal_initializer(stddev=0.1), trainable=trainable)
+    #     biases = tf.get_variable("biases", (64), initializer=tf.constant_initializer(0.0), trainable=trainable)
+    #     regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
+    #     conv = tf.nn.conv2d(input_, weights, strides=[1, 1, 1, 1], padding='SAME', kernel_regularizer=regularizer)
+    #     batch_norm = tf.nn.batch_normalization(conv, variance_epsilon=1e-3)
+    #     relu = tf.nn.relu(conv + biases)
+    #     tf.summary.histogram("act", relu)
+    #     in order to reduce dimensionality, use bigger pooling size
+        # pool = max_pool(relu, pool_size=pool_size)
+    # with tf.variable_scope("conv_pool_2"):
+    #     conv2 = conv_relu_pool(conv1, [5, 5, 6, 16], [16], pool_size=2, trainable=trainable)
+    weight_decay = 0.0001
+    # build model
+    model = Sequential()
+
+    # Block 1
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv1', input_shape=input_shape))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
+
+    # Block 2
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
+    #
+    # Block 3
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
+    #
+    # Block 4
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
+
+    # Block 5
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
+
+    return model
+
+
+def VGG19_preload():
+    logger.debug("filename: {}".format(os.path.abspath(__file__)))
+    model = load_model(os.path.join(os.path.dirname(os.path.abspath(__file__)), "1522967518.1916964_vgg19_cifar10.h5"))
+    vgg_conv_model = Model(inputs=model.input,
+                           outputs=model.get_layer('block5_pool').output)
+    return vgg_conv_model
+
+
+def get_gamma_value(arguments, dat, chi2=False):
+    if arguments["--gamma"] is None:
+        logger.debug("Gamma arguments is None. Need to compute it.")
+        if chi2:
+            gamma_value = 1./compute_euristic_sigma_chi2(dat.train.data)
+
+        else:
+            gamma_value = 1./compute_euristic_sigma(dat.train.data)
+    else:
+        gamma_value = eval(arguments["--gamma"])
+
+    logger.debug("Gamma value is {}".format(gamma_value))
+    return gamma_value
+
+def init_kernel():
+    kernel_dict = {}
+    GAMMA = None
+    if RBF_KERNEL:
+        KERNEL = tf_rbf_kernel
+        KERNEL_NAME = "rbf"
+        GAMMA = get_gamma_value(arguments, data)
+        kernel_dict = {"gamma": GAMMA}
+    elif LINEAR_KERNEL:
+        KERNEL = tf_linear_kernel
+        KERNEL_NAME = "linear"
+    elif CHI2_KERNEL:
+        KERNEL = tf_chi_square_CPD
+        KERNEL_NAME = "chi2_cpd"
+    elif CHI2_EXP_KERNEL:
+        KERNEL = tf_chi_square_CPD_exp
+        KERNEL_NAME = "chi2_exp_cpd"
+        GAMMA = get_gamma_value(arguments, data, chi2=True)
+        kernel_dict = {"gamma": GAMMA}
+    elif CHI2_PD_KERNEL:
+        KERNEL = tf_chi_square_PD
+        KERNEL_NAME = "chi2_pd"
+    elif SIGMOID_KERNEL:
+        KERNEL = tf_sigmoid_kernel
+        KERNEL_NAME = "sigmoid"
+        GAMMA = get_gamma_value(arguments, data)
+        CONST = float(arguments["--intercept-constant"])
+        kernel_dict = {"gamma": GAMMA, "constant": CONST}
+    elif LAPLACIAN_KERNEL:
+        KERNEL = tf_laplacian_kernel
+        KERNEL_NAME = "laplacian"
+        GAMMA = get_gamma_value(arguments, data)
+        kernel_dict = {"gamma": np.sqrt(GAMMA)}
+    elif STACKED_KERNEL:
+        GAMMA = get_gamma_value(arguments, data)
+
+
+        def KERNEL(X, Y):
+            return tf_stack_of_kernels(X, Y, [tf_rbf_kernel for _ in GAMMA],
+                                       [{"gamma": g_value} for g_value in GAMMA])
+
+
+        KERNEL_NAME = "stacked"
+
+    elif SUMED_KERNEL:
+        GAMMA = get_gamma_value(arguments, data)
+
+
+        def KERNEL(X, Y):
+            return tf_sum_of_kernels(X, Y, [tf_rbf_kernel for _ in GAMMA],
+                                     [{"gamma": g_value} for g_value in GAMMA])
+
+
+        KERNEL_NAME = "summed"
+    else:
+        raise Exception("No kernel function specified for deepstrom")
+
+    return KERNEL_NAME, KERNEL, kernel_dict, GAMMA
+
+
+if __name__ == '__main__':
+
+    arguments = docopt.docopt(__doc__)
+    NUM_EPOCH = int(arguments["--num-epoch"])
+    BATCH_SIZE = int(arguments["--batch-size"])
+    SEED_TRAIN_VALIDATION = 0
+    SEED = int(arguments["--seed"])
+    OUT_DIM = int(arguments["--out-dim"]) if arguments["--out-dim"] is not None else None
+    VALIDATION_SIZE = int(arguments["--validation-size"])
+    NYS_SUBSAMPLE_SIZE = int(arguments["--nys-size"])
+    if OUT_DIM is None:
+        OUT_DIM = NYS_SUBSAMPLE_SIZE
+    KERNEL_NAME = None
+    GAMMA = None
+    CONST = None
+    REAL_NYSTROM = arguments["--real-nystrom"]
+
+    NON_LINEAR = tf.nn.relu if arguments["--non-linear"] else None
+
+    RBF_KERNEL = arguments["--rbf-kernel"]
+    LINEAR_KERNEL = arguments["--linear-kernel"]
+    CHI2_KERNEL = arguments["--chi-square-kernel"]
+    CHI2_EXP_KERNEL = arguments["--exp-chi-square-kernel"]
+    CHI2_PD_KERNEL = arguments["--chi-square-PD-kernel"]
+    SIGMOID_KERNEL = arguments["--sigmoid-kernel"]
+    LAPLACIAN_KERNEL = arguments["--laplacian-kernel"]
+    STACKED_KERNEL = arguments["--stacked-kernel"]
+    SUMED_KERNEL = arguments["--sumed-kernel"]
+
+    data = dataset.Cifar10Dataset(validation_size=VALIDATION_SIZE, seed=SEED_TRAIN_VALIDATION)
+    data.load()
+    data.normalize()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    data.to_one_hot()
+
+    logger.debug("Start benchmark with parameters: {}".format(" ".join(sys.argv[1:])))
+    logger.debug("Using dataset {} with validation size {} and seed for spliting set {}.".format(data.s_name, data.validation_size, data.seed))
+    logger.debug("Shape of train set data: {}; shape of train set labels: {}".format(data.train[0].shape, data.train[1].shape))
+    logger.debug("Shape of validation set data: {}; shape of validation set labels: {}".format(data.validation[0].shape, data.validation[1].shape))
+    logger.debug("Shape of test set data: {}; shape of test set labels: {}".format(data.test[0].shape, data.test[1].shape))
+    logger.debug("Sample of label: {}".format(data.train[1][0]))
+
+    NYS_SUBSAMPLE_SIZE = int(arguments["--nys-size"])
+    if OUT_DIM is None:
+        OUT_DIM = NYS_SUBSAMPLE_SIZE
+
+    KERNEL_NAME, KERNEL, kernel_dict, GAMMA = init_kernel()
+
+    input_dim, output_dim = data.train[0].shape[1:], data.train[1].shape[1]
+    with tf.Graph().as_default():
+        np.random.seed(SEED)
+        nys_subsample_index = np.random.permutation(data.train[0].shape[0])
+        nys_subsample = data.train[0][nys_subsample_index[:NYS_SUBSAMPLE_SIZE]]
+
+        nys_subsample_placeholder = tf.Variable(nys_subsample, dtype=tf.float32, name="nys_subsample", trainable=False)
+
+        x = tf.placeholder(tf.float32, shape=[None, *input_dim], name="x")
+        y = tf.placeholder(tf.float32, shape=[None, output_dim], name="label")
+        # nys_subsample_placeholder = tf.placeholder(tf.float32, shape=[NYS_SUBSAMPLE_SIZE, *input_dim], name="nys_subsample")
+
+        # vgg_conv_model = VGG19_preload()
+        with tf.variable_scope("Convolution") as scope_convolution:
+            vgg_conv_model = VGG19(input_dim)
+            vgg_conv_model.trainable=False
+            conv_x = vgg_conv_model(x)
+            tf.summary.histogram("convolution_x", conv_x)
+            vgg_conv_model_subsample = keras.Model(inputs=vgg_conv_model.inputs,
+                                                   outputs=vgg_conv_model.outputs)
+            vgg_conv_model_subsample.trainable = False
+            conv_nys_subsample = vgg_conv_model_subsample(nys_subsample_placeholder)
+
+        logger.debug("Selecting deepstrom layer function with "
+                     "subsample size = {}, "
+                     "output_dim = {}, "
+                     "{} activation function "
+                     "and kernel = {}"
+                     .format(NYS_SUBSAMPLE_SIZE,
+                             OUT_DIM,
+                             "with" if NON_LINEAR else "without",
+                             KERNEL_NAME))
+        if OUT_DIM is not None and OUT_DIM > NYS_SUBSAMPLE_SIZE:
+            logger.debug("Output dim is greater than deepstrom subsample size. Aborting.")
+            # todo change this because it is copy-pasted (use function instead)
+
+            global_acc_val = None
+            global_acc_test = None
+            training_time = None
+            printed_r_list = [str(global_acc_val),
+                              str(global_acc_test),
+                              str(training_time),
+                              str(NUM_EPOCH),
+                              str(BATCH_SIZE),
+                              str(OUT_DIM),
+                              str(KERNEL_NAME),
+                              str(GAMMA),
+                              str(CONST),
+                              str(NYS_SUBSAMPLE_SIZE),
+                              str(VALIDATION_SIZE),
+                              str(SEED),
+                              str(NON_LINEAR),
+                              ]
+            print(",".join(printed_r_list))
+            exit()
+        w_matrix = None
+        if REAL_NYSTROM:
+            init_dim = np.prod([s.value for s in conv_x.shape[1:] if s.value is not None])
+            h_conv_nystrom_subsample_flat = tf.reshape(conv_nys_subsample, [conv_nys_subsample.shape[0], init_dim])
+
+            K_matrix = KERNEL(h_conv_nystrom_subsample_flat, h_conv_nystrom_subsample_flat, **kernel_dict)
+            S, U, V = tf.svd(K_matrix)
+            invert_root_K = tf.matmul(tf.matmul(U, tf.sqrt(tf.diag(S))), tf.transpose(V))
+            w_matrix = invert_root_K
+
+        input_classif = fct_deepstrom(conv_x, OUT_DIM, conv_nys_subsample, KERNEL, kernel_dict, w_matrix=w_matrix, non_linearity=NON_LINEAR)
+
+        classif, keep_prob = classification_cifar(input_classif, output_dim)
+
+        # calcul de la loss
+        with tf.name_scope("xent"):
+            cross_entropy = tf.reduce_mean(
+                tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=classif, name="xentropy"),
+                name="xentropy_mean")
+            tf.summary.scalar('loss-xent', cross_entropy)
+
+        # todo learning rate as hyperparameter
+        # calcul du gradient
+        with tf.name_scope("train"):
+            global_step = tf.Variable(0, name="global_step", trainable=False)
+            train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy,
+                                                                                  global_step=global_step)
+
+        # calcul de l'accuracy
+        with tf.name_scope("accuracy"):
+            predictions = tf.argmax(classif, 1)
+            correct_prediction = tf.equal(predictions, tf.argmax(y, 1))
+            accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+            tf.summary.scalar("accuracy", accuracy_op)
+
+        merged_summary = tf.summary.merge_all()
+
+        init = tf.global_variables_initializer()
+        # Create a session for running Ops on the Graph.
+        # Instantiate a SummaryWriter to output summaries and the Graph.
+        # summary_writer = tf.summary.FileWriter("debug_benchmark_vgg")
+        # Initialize all Variable objects
+        # actual learning
+        saver = tf.train.Saver()
+
+        with tf.Session() as sess:
+            logger.debug("trainable variables are: {}".format(tf.trainable_variables()))
+            # summary_writer.add_graph(sess.graph)
+            # Initialize all Variable objects
+            datagen = ImageDataGenerator(horizontal_flip=True,
+                                         width_shift_range=0.125,
+                                         height_shift_range=0.125,
+                                         fill_mode='constant',
+                                         cval=0.)
+            datagen.fit(data.train[0])
+            sess.run(init)
+            # actual learning
+            # feed_dict_val = {x: data.validation[0], y: data.validation[1], keep_prob: 1.0}
+            global_start = t.time()
+            feed_dict = {nys_subsample_placeholder: nys_subsample}
+            feed_dict_val = {nys_subsample_placeholder: nys_subsample}
+            feed_dict_test = {nys_subsample_placeholder: nys_subsample}
+            start_time_int = int(t.time())
+            for i in range(NUM_EPOCH):
+                saver.save(sess, os.path.abspath('end_to_end_model'), global_step=start_time_int)
+                start = t.time()
+                # for X_batch, Y_batch in batch_generator(data.train[0], data.train[1], BATCH_SIZE, True):
+                batchgen = datagen.flow(data.train[0], data.train[1], BATCH_SIZE, shuffle=False)
+                j = 0
+                log_memory_usage()
+                while j < len(batchgen):
+                    X_batch, Y_batch = next(batchgen)
+                    # batch_generator(data.train[0], data.train[1], BATCH_SIZE, True):
+                    # X_batch = tf.map_fn(lambda img: datagen.random_transform(img), X_batch)
+                    feed_dict.update({x: X_batch, y: Y_batch, keep_prob: 0.5})
+                    _, loss, acc = sess.run([train_optimizer, cross_entropy, accuracy_op], feed_dict=feed_dict)
+                    if j % 100 == 0:
+                        # summary_str = sess.run(merged_summary, feed_dict=feed_dict)
+                        # summary_writer.add_summary(summary_str, j)
+                        logger.debug("epoch: {}/{}; batch: {}/{}; loss: {}; acc: {}".format(i, NUM_EPOCH,
+                                                                                            j, int(data.train[0].shape[0]/BATCH_SIZE),
+                                                                                            loss, acc))
+                    j += 1
+
+            training_time = t.time() - global_start
+            accuracies_val = []
+            i = 0
+            for X_batch, Y_batch in batch_generator(data.validation[0], data.validation[1], 1000, False):
+                feed_dict_val.update({x: X_batch, y: Y_batch, keep_prob: 1.0})
+                accuracy = sess.run([accuracy_op], feed_dict=feed_dict_val)
+                accuracies_val.append(accuracy[0])
+                i += 1
+
+            accuracies_test = []
+            i = 0
+            for X_batch, Y_batch in batch_generator(data.test[0], data.test[1], 1000, False):
+                feed_dict_test.update({x: X_batch, y: Y_batch, keep_prob: 1.0})
+                accuracy = sess.run([accuracy_op], feed_dict=feed_dict_test)
+                accuracies_test.append(accuracy[0])
+                i += 1
+
+        global_acc_val = sum(accuracies_val) / i
+        global_acc_test = sum(accuracies_test) / i
+        printed_r_list = [str(global_acc_val),
+                          str(global_acc_test),
+                          str(training_time),
+                          str(NUM_EPOCH),
+                          str(BATCH_SIZE),
+                          str(OUT_DIM),
+                          str(KERNEL_NAME),
+                          str(GAMMA),
+                          str(CONST),
+                          str(NYS_SUBSAMPLE_SIZE),
+                          str(VALIDATION_SIZE),
+                          str(SEED),
+                          str(NON_LINEAR),
+                          ]
+        print(",".join(printed_r_list))
+
+
diff --git a/main/experiments/deepstrom_classif_end_to_end_mnist.ipynb b/main/experiments/deepstrom_classif_end_to_end_mnist.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..01c10b25b30536ce51dd9b5d827ca16f8237e65a
--- /dev/null
+++ b/main/experiments/deepstrom_classif_end_to_end_mnist.ipynb
@@ -0,0 +1,34 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/main/experiments/graph_drawing/paper/cifar/cifar_few_data_more_subsample.zip b/main/experiments/graph_drawing/paper/cifar/cifar_few_data_more_subsample.zip
deleted file mode 100644
index 010806ef26483a5ab635d653441d230fd96318bc..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/cifar/cifar_few_data_more_subsample.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/cifar/cifar_sota.zip b/main/experiments/graph_drawing/paper/cifar/cifar_sota.zip
deleted file mode 100644
index a9b058c904d7cd6aaa21cae5a4272d4d79f95bb9..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/cifar/cifar_sota.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/cifar/multikernel/by_sigma_value.zip b/main/experiments/graph_drawing/paper/cifar/multikernel/by_sigma_value.zip
deleted file mode 100644
index 744cde3f3c373edee67ec0d4715d122ac8189612..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/cifar/multikernel/by_sigma_value.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/cifar/plt_2D.zip b/main/experiments/graph_drawing/paper/cifar/plt_2D.zip
deleted file mode 100644
index 7cc268b56104a4ff05d0dfee07346ec0217d042e..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/cifar/plt_2D.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/mnist/mnist_few_data_more_subsample.zip b/main/experiments/graph_drawing/paper/mnist/mnist_few_data_more_subsample.zip
deleted file mode 100644
index beb2bd587f37e5580fb8edba6a108e5ad3d9c563..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/mnist/mnist_few_data_more_subsample.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/mnist/mnist_sota.zip b/main/experiments/graph_drawing/paper/mnist/mnist_sota.zip
deleted file mode 100644
index c5177ae60b2a5909796ad2804e99069803b37a89..0000000000000000000000000000000000000000
Binary files a/main/experiments/graph_drawing/paper/mnist/mnist_sota.zip and /dev/null differ
diff --git a/main/experiments/graph_drawing/paper/__init__.py b/main/experiments/graph_drawing/till_october_2018/__init__.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/__init__.py
rename to main/experiments/graph_drawing/till_october_2018/__init__.py
diff --git a/main/experiments/graph_drawing/paper/cifar100/__init__.py b/main/experiments/graph_drawing/till_october_2018/cifar100_bloc3_test/__init__.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/cifar100/__init__.py
rename to main/experiments/graph_drawing/till_october_2018/cifar100_bloc3_test/__init__.py
diff --git a/main/experiments/graph_drawing/cifar100_bloc3_test/cifar_100_bloc3_test.py b/main/experiments/graph_drawing/till_october_2018/cifar100_bloc3_test/cifar_100_bloc3_test.py
similarity index 100%
rename from main/experiments/graph_drawing/cifar100_bloc3_test/cifar_100_bloc3_test.py
rename to main/experiments/graph_drawing/till_october_2018/cifar100_bloc3_test/cifar_100_bloc3_test.py
diff --git a/main/experiments/graph_drawing/paper_iclr/__init__.py b/main/experiments/graph_drawing/till_october_2018/cifar100_cifar10_svhn_2_layers_non_lin/__init__.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/__init__.py
rename to main/experiments/graph_drawing/till_october_2018/cifar100_cifar10_svhn_2_layers_non_lin/__init__.py
diff --git a/main/experiments/graph_drawing/cifar100_cifar10_svhn_2_layers_non_lin/cifar_100_all_data.py b/main/experiments/graph_drawing/till_october_2018/cifar100_cifar10_svhn_2_layers_non_lin/cifar_100_all_data.py
similarity index 100%
rename from main/experiments/graph_drawing/cifar100_cifar10_svhn_2_layers_non_lin/cifar_100_all_data.py
rename to main/experiments/graph_drawing/till_october_2018/cifar100_cifar10_svhn_2_layers_non_lin/cifar_100_all_data.py
diff --git a/main/experiments/graph_drawing/draw_graphes.py b/main/experiments/graph_drawing/till_october_2018/draw_graphes.py
similarity index 100%
rename from main/experiments/graph_drawing/draw_graphes.py
rename to main/experiments/graph_drawing/till_october_2018/draw_graphes.py
diff --git a/main/experiments/graph_drawing/draw_graphes_vgg_cifar.py b/main/experiments/graph_drawing/till_october_2018/draw_graphes_vgg_cifar.py
similarity index 100%
rename from main/experiments/graph_drawing/draw_graphes_vgg_cifar.py
rename to main/experiments/graph_drawing/till_october_2018/draw_graphes_vgg_cifar.py
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/__init__.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/__init__.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/__init__.py
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/__init__.py
diff --git a/main/experiments/graph_drawing/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.ipynb b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.ipynb
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.ipynb
diff --git a/main/experiments/graph_drawing/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.py
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.py
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_big_subsample/graph_drawing_few_shot_tests_big_subsample.py
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar10/__init__.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/__init__.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar10/__init__.py
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/__init__.py
diff --git a/main/experiments/graph_drawing/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.ipynb b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.ipynb
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.ipynb
diff --git a/main/experiments/graph_drawing/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.py
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.py
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_knn_nobacktrain/graph_drawing_few_shot_tests_knn_nobacktrain.py
diff --git a/main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/__init__.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.ipynb b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.ipynb
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.ipynb
diff --git a/main/experiments/graph_drawing/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.py b/main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.py
similarity index 100%
rename from main/experiments/graph_drawing/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.py
rename to main/experiments/graph_drawing/till_october_2018/few_shot_tests_proto_withW/graph_drawing_few_shot_tests_proto_withW.py
diff --git a/main/experiments/graph_drawing/till_october_2018/multiview_cifar100/__init__.py b/main/experiments/graph_drawing/till_october_2018/multiview_cifar100/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/multiview_cifar100/multiview_cifar100.py b/main/experiments/graph_drawing/till_october_2018/multiview_cifar100/multiview_cifar100.py
similarity index 100%
rename from main/experiments/graph_drawing/multiview_cifar100/multiview_cifar100.py
rename to main/experiments/graph_drawing/till_october_2018/multiview_cifar100/multiview_cifar100.py
diff --git a/main/experiments/graph_drawing/multiview_cifar100_lin_no_w/graph_drawing_multiview_cifar100_no_w.ipynb b/main/experiments/graph_drawing/till_october_2018/multiview_cifar100_lin_no_w/graph_drawing_multiview_cifar100_no_w.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/multiview_cifar100_lin_no_w/graph_drawing_multiview_cifar100_no_w.ipynb
rename to main/experiments/graph_drawing/till_october_2018/multiview_cifar100_lin_no_w/graph_drawing_multiview_cifar100_no_w.ipynb
diff --git a/main/experiments/graph_drawing/till_october_2018/multiview_svhn/__init__.py b/main/experiments/graph_drawing/till_october_2018/multiview_svhn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/multiview_svhn/multiview_svhn.py b/main/experiments/graph_drawing/till_october_2018/multiview_svhn/multiview_svhn.py
similarity index 100%
rename from main/experiments/graph_drawing/multiview_svhn/multiview_svhn.py
rename to main/experiments/graph_drawing/till_october_2018/multiview_svhn/multiview_svhn.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/till_october_2018/paper/cifar100/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper/cifar100/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper/cifar100/cifar_100_all_data.py b/main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_all_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/cifar100/cifar_100_all_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_all_data.py
diff --git a/main/experiments/graph_drawing/paper/cifar100/cifar_100_all_data_time.py b/main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_all_data_time.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/cifar100/cifar_100_all_data_time.py
rename to main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_all_data_time.py
diff --git a/main/experiments/graph_drawing/paper/cifar100/cifar_100_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/cifar100/cifar_100_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper/cifar100/cifar_100_few_data.py
diff --git a/main/experiments/graph_drawing/paper/tcnn_moviereview_pre_trained_expe.py b/main/experiments/graph_drawing/till_october_2018/paper/tcnn_moviereview_pre_trained_expe.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/tcnn_moviereview_pre_trained_expe.py
rename to main/experiments/graph_drawing/till_october_2018/paper/tcnn_moviereview_pre_trained_expe.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper/very_big_all_datasets_few_data/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper/very_big_all_datasets_few_data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper/very_big_all_datasets_few_data/all_dataset_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper/very_big_all_datasets_few_data/all_dataset_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/very_big_all_datasets_few_data/all_dataset_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper/very_big_all_datasets_few_data/all_dataset_few_data.py
diff --git a/main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_124_conv.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_124_conv.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_124_conv.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_124_conv.py
diff --git a/main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_3_conv.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_3_conv.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_3_conv.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_bad_vgg_3_conv.py
diff --git a/main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_expe_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_expe_few_data.py
diff --git a/main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed.py
diff --git a/main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed_sigma_range.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed_sigma_range.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed_sigma_range.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_cifar_deepstrom_pre_trained_stacked_summed_sigma_range.py
diff --git a/main/experiments/graph_drawing/paper/vgg_svhn_deepstrom_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_svhn_deepstrom_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_svhn_deepstrom_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_svhn_deepstrom_few_data.py
diff --git a/main/experiments/graph_drawing/paper/vgg_svhn_deepstrom_std_classif.py b/main/experiments/graph_drawing/till_october_2018/paper/vgg_svhn_deepstrom_std_classif.py
similarity index 100%
rename from main/experiments/graph_drawing/paper/vgg_svhn_deepstrom_std_classif.py
rename to main/experiments/graph_drawing/till_october_2018/paper/vgg_svhn_deepstrom_std_classif.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_multiview/multiview_cifar100.ipynb b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/multiview_cifar100.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_multiview/multiview_cifar100.ipynb
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/multiview_cifar100.ipynb
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_multiview/multiview_cifar100.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/multiview_cifar100.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_multiview/multiview_cifar100.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/multiview_cifar100.py
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_multiview/resultsmax.ods b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/resultsmax.ods
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_multiview/resultsmax.ods
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_multiview/resultsmax.ods
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar10/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar10/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar10/vgg_cifar_deepstrom_classif_normal.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar10/vgg_cifar_deepstrom_classif_normal.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar10/vgg_cifar_deepstrom_classif_normal.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar10/vgg_cifar_deepstrom_classif_normal.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar100/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar100/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar100/vgg_cifar100_deepstrom_classif_normal.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar100/vgg_cifar100_deepstrom_classif_normal.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/cifar100/vgg_cifar100_deepstrom_classif_normal.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/cifar100/vgg_cifar100_deepstrom_classif_normal.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/mnist/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/mnist/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/mnist/vgg_mnist_deepstrom_classif_normal.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/mnist/vgg_mnist_deepstrom_classif_normal.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/mnist/vgg_mnist_deepstrom_classif_normal.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/mnist/vgg_mnist_deepstrom_classif_normal.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/svhn/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/svhn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_normal/svhn/vgg_svhn_deepstrom_classif_normal.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/svhn/vgg_svhn_deepstrom_classif_normal.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_normal/svhn/vgg_svhn_deepstrom_classif_normal.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_normal/svhn/vgg_svhn_deepstrom_classif_normal.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar10_smll_dataset.ipynb b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar10_smll_dataset.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar10_smll_dataset.ipynb
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar10_smll_dataset.ipynb
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar_deepstrom_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar_deepstrom_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar_deepstrom_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar10/vgg_cifar_deepstrom_few_data.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar100/vgg_cifar100.ipynb b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/vgg_cifar100.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar100/vgg_cifar100.ipynb
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/vgg_cifar100.ipynb
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar100/vgg_deepstrom_cifar100_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/vgg_deepstrom_cifar100_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/cifar100/vgg_deepstrom_cifar100_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/cifar100/vgg_deepstrom_cifar100_few_data.py
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/mnist/lecun_mnist.ipynb b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/lecun_mnist.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/mnist/lecun_mnist.ipynb
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/lecun_mnist.ipynb
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/mnist/vgg_mnist_deepstrom_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/vgg_mnist_deepstrom_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/mnist/vgg_mnist_deepstrom_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/mnist/vgg_mnist_deepstrom_few_data.py
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/resultsmax.ods b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/resultsmax.ods
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/resultsmax.ods
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/resultsmax.ods
diff --git a/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/__init__.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn.ipynb b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn.ipynb
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn.ipynb
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn.ipynb
diff --git a/main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn_deepstrom_few_data.py b/main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn_deepstrom_few_data.py
similarity index 100%
rename from main/experiments/graph_drawing/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn_deepstrom_few_data.py
rename to main/experiments/graph_drawing/till_october_2018/paper_iclr/accuracy_small_dataset/svhn/vgg_svhn_deepstrom_few_data.py
diff --git a/main/experiments/graph_drawing/till_october_2018/transfert_few_data/vgg_svhn_from_cifar100_deepstrom_few_data.py b/main/experiments/graph_drawing/till_october_2018/transfert_few_data/vgg_svhn_from_cifar100_deepstrom_few_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..c40386f2524bc613efd1c0b96129685ba4fb7af7
--- /dev/null
+++ b/main/experiments/graph_drawing/till_october_2018/transfert_few_data/vgg_svhn_from_cifar100_deepstrom_few_data.py
@@ -0,0 +1,271 @@
+import os
+
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import pathlib
+from skluc.main.utils import logger
+
+matplotlib.rcParams.update({'font.size': 14})
+
+pd.set_option('display.width', 1000)
+
+DAT = ["SVHN"]
+DIR = ["/home/luc/Resultats/Deepstrom/SVHN/june_2018/svhn_few_data_debug_plus_dropout"]
+
+
+for h in range(len(DIR)):
+    DATANAME = DAT[h]
+    DIRNAME = DIR[h]
+
+    FILENAME = "gathered_results.csv"
+
+    min_acc = 0.00
+    max_acc = 1.05
+    # max_acc = 1.0
+    linewidth = 0.9
+    output_conv_dim = 512
+    nb_classes = 10
+
+    real_nys_marker = "s"
+
+    learned_nys_marker = "x"
+
+    linearity_color = "g"
+
+    dense_marker = "v"
+    dense_color = "r"
+
+    deepfried_marker = "8"
+    deepfried_color = "b"
+
+    d_translate_kernel = {
+        "linear": "Linear",
+        "chi2_cpd": "Chi2",
+        "rbf": "Gaussian"
+    }
+
+    if __name__ == '__main__':
+        filepath = os.path.join(DIRNAME, FILENAME)
+        field_names = ["method_name",
+                       "accuracy_val",
+                       "accuracy_test",
+                       "runtime",
+                       "number_epoch",
+                       "batch_size",
+                       "repr_dim",
+                       "two_layers_dense",
+                       "kernel_deepstrom",
+                       "gamma_kernel",
+                       "constante_sigmoid",
+                       "nb_layer_deepfried",
+                       "subsample_size",
+                       "validation_size",
+                       "seed",
+                       "non_linearity",
+                       "real_nystrom",
+                       "repr_quality",
+                       "train_size",
+                       "dropout"
+                       ]
+
+        df = pd.read_csv(filepath, names=field_names)
+        # df = df[df["accuracy_val"] != 'None']
+        df = df.apply(pd.to_numeric, errors="ignore")
+        method_names = set(df["method_name"].values)
+        kernel_names = set(df["kernel_deepstrom"].values)
+        kernel_names.remove("None")
+        # kernel_names.remove("laplacian")
+        repr_dim = set(df["repr_dim"].values)
+        repr_dim.remove("None")  # dtype: str
+        # repr_dim.remove("16")
+        nys_size = set(df["subsample_size"].values)
+        nys_size.remove("None")
+        nb_layers_deepfried = set(df["nb_layer_deepfried"].values)
+        nb_layers_deepfried.remove("None")
+        seed_values = set(df["seed"].values)
+        batch_size = 128
+        train_sizes = set(df["train_size"])
+        dropout_values = set(df["dropout"].values)
+        sigma_values = set(df["gamma_kernel"].values)
+        sigma_values.remove("None")
+        sigma_values = list(sigma_values)
+        logger.debug("Nystrom possible sizes are: {}".format(nys_size))
+        logger.debug("Kernel functions are: {}".format(kernel_names))
+        logger.debug("Compared network types are: {}".format(method_names))
+        logger.debug("Tested representation dimension are: {}".format(repr_dim))
+
+        means_deepstrom = {}
+
+        for t_size in sorted(list(train_sizes)):
+            df_tsize = df[df["train_size"] == t_size]
+            for drop_val in dropout_values:
+                if int(drop_val) != 1:
+                    continue
+                df_drop = df_tsize[df_tsize["dropout"] == drop_val]
+
+                # plot deepstrom
+                # ==============
+                df_deepstrom = df_drop[df_drop["method_name"] == "deepstrom"]
+                df_deepstrom["subsample_size"] = df_deepstrom["subsample_size"].astype(np.int)
+                df_deepstrom_sort = df_deepstrom.sort_values(by=["subsample_size"])
+                for i, k_name in enumerate(sorted(kernel_names)):
+                    if k_name != "rbf":
+                        df_deepstrom_kernels = [df_deepstrom_sort[df_deepstrom_sort["kernel_deepstrom"] == k_name]]
+                    else:
+                        df_deepstrom_kernels = []
+                        df_deepstrom_kernel_tmp = df_deepstrom_sort[df_deepstrom_sort["kernel_deepstrom"] == k_name]
+                        for sig_val in sigma_values:
+                            if float(sig_val) != 0.1:
+                                continue
+                            df_deepstrom_kernels.append(
+                                df_deepstrom_kernel_tmp[df_deepstrom_kernel_tmp["gamma_kernel"] == sig_val])
+
+                    for j, df_deepstrom_kernel in enumerate(df_deepstrom_kernels):
+                        f, ax = plt.subplots()
+                        # non_lin_dfs = {
+                        #     "linear": df_deepstrom_kernel[df_deepstrom_kernel["non_linearity"] == "None"],
+                        # }
+                        # get the results of learned nystrom
+                        df_deepstrom_kernel_w = df_deepstrom_kernel[df_deepstrom_kernel["real_nystrom"] == False]
+                        np_deepstrom_kernel_w_mean_accuracy_test = np.mean(np.array([
+                            list(df_deepstrom_kernel_w[df_deepstrom_kernel_w["seed"] == seed_v]["accuracy_test"]) for
+                            seed_v in seed_values
+                        ]), axis=0)
+                        np_deepstrom_kernel_w_std_accuracy_test = np.std(np.array(
+                            [list(df_deepstrom_kernel_w[df_deepstrom_kernel_w["seed"] == seed_v]["accuracy_test"]) for
+                             seed_v in
+                             seed_values]), axis=0)
+                        np_param_nbr_deepstrom_kernel_w = (
+                                np.square(np.array(sorted(set(df_deepstrom_kernel_w["subsample_size"])))) +  # m x m
+                                np.array(
+                                    sorted(set(df_deepstrom_kernel_w["subsample_size"]))) * output_conv_dim +  # m x d
+                                np.array(
+                                    sorted(list(set(df_deepstrom_kernel_w["subsample_size"])))) * nb_classes)  # m x c
+
+                        ax.errorbar(np_param_nbr_deepstrom_kernel_w,
+                                    np_deepstrom_kernel_w_mean_accuracy_test,
+                                    np_deepstrom_kernel_w_std_accuracy_test,
+                                    marker=learned_nys_marker, color=linearity_color,
+                                    label="Adaptative Deepström",
+                                     capsize=3)
+
+                        # get the results of vanilla nystrom
+                        df_deepstrom_kernel_k = df_deepstrom_kernel[df_deepstrom_kernel["real_nystrom"]]
+                        if len(df_deepstrom_kernel_k):
+                            np_deepstrom_kernel_k_mean_accuracy_test = np.mean(
+                                np.array([list(
+                                    df_deepstrom_kernel_k[df_deepstrom_kernel_k["seed"] == seed_v]["accuracy_test"]) for
+                                          seed_v in
+                                          seed_values]), axis=0)
+                            np_deepstrom_kernel_k_std_accuracy_test = np.std(
+                                np.array([list(
+                                    df_deepstrom_kernel_k[df_deepstrom_kernel_k["seed"] == seed_v]["accuracy_test"]) for
+                                          seed_v in
+                                          seed_values]), axis=0)
+
+                            np_param_nbr_deepstrom_kernel_k = (
+                                    np.square(np.array(sorted(set(df_deepstrom_kernel_k["subsample_size"])))) +  # m x m
+                                    np.array(sorted(
+                                        set(df_deepstrom_kernel_k["subsample_size"]))) * output_conv_dim +  # m x d
+                                    np.array(sorted(
+                                        list(set(df_deepstrom_kernel_k["subsample_size"])))) * nb_classes)  # m x c
+
+                            ax.errorbar(np_param_nbr_deepstrom_kernel_k,
+                                        np_deepstrom_kernel_k_mean_accuracy_test,
+                                        np_deepstrom_kernel_k_std_accuracy_test,
+                                        marker=real_nys_marker, color=linearity_color,
+                                        label="Deepström",
+                                         capsize=3)
+
+                        # plot dense
+                        # ==========
+                        df_dense = df_drop[df_drop["method_name"] == "dense"]
+                        df_dense = df_dense[df_dense["train_size"] == t_size]
+                        df_dense["repr_dim"] = df_dense["repr_dim"].astype(np.int)
+                        df_dense = df_dense.sort_values(by=["repr_dim"])
+                        np_dense_mean_accuracy_test = np.mean(
+                            np.array([list(df_dense[df_dense["seed"] == seed_v]["accuracy_test"]) for seed_v in
+                                      seed_values]), axis=0)
+                        np_dense_std_accuracy_test = np.std(
+                            np.array([list(df_dense[df_dense["seed"] == seed_v]["accuracy_test"]) for seed_v in
+                                      seed_values]), axis=0)
+                        ax.errorbar(
+                            np.array(sorted([int(n) for n in np.unique(df_dense["repr_dim"])])) * output_conv_dim +
+                            np.array(sorted([int(n) for n in np.unique(df_dense["repr_dim"])])) * nb_classes,
+                            np_dense_mean_accuracy_test,
+                            np_dense_std_accuracy_test,
+                            color=dense_color,
+                            marker=dense_marker,
+                            label="Fully Connected", capsize=3)
+
+                        # plot deepfried
+                        # ==============
+                        df_deepfried = df_drop[df_drop["method_name"] == "deepfriedconvnet"]
+                        np_deepfried_mean_accuracy_test = []
+                        np_deepfried_std_accuracy_test = []
+                        for l_nb in sorted(nb_layers_deepfried):
+                            df_deepfried_stack = df_deepfried[df_deepfried["nb_layer_deepfried"] == l_nb]
+                            np_deepfried_mean_accuracy_test.append(np.mean(df_deepfried_stack["accuracy_test"]))
+                            np_deepfried_std_accuracy_test.append(np.std(df_deepfried_stack["accuracy_test"]))
+
+                        ax.errorbar([(output_conv_dim * 3 + output_conv_dim * nb_classes) * i for i in [1]],
+                                    np_deepfried_mean_accuracy_test,
+                                    np_deepfried_std_accuracy_test,
+                                    color=deepfried_color,
+                                    marker=deepfried_marker,
+
+                                    label="Adaptative DeepFriedConvnet", capsize=3)
+                        ax.set_ylim(min_acc, max_acc)
+                        ax.set_ylabel("Accuracy")
+                        ax.set_xticks([1e4, 1e5, 1e6])
+                        # if i == 2:
+                        # ax.set_xlabel("# Parameters")
+                        ax.set_xlabel("# Parameters")
+                        ax.legend(bbox_to_anchor=(0.5, -0.20), loc="upper center", ncol=2)
+                        ax.set_xticklabels([1e4, 1e5, 1e6])
+                        # else:
+                        #     ax.set_xticklabels([])
+                        ax.set_xscale("symlog")
+
+                        ax_twin = ax.twiny()
+                        ax_twin.set_xscale("symlog")
+                        ax_twin.set_xlim(ax.get_xlim())
+                        ax_twin.set_xticks(np_param_nbr_deepstrom_kernel_w)
+
+                        # if i == 0:
+                        ax_twin.set_xlabel("Subsample Size")
+                        ax.set_title(
+                            "{} Kernel - {} - Train size: {}".format(d_translate_kernel[k_name], DATANAME, t_size),
+                            y=1.2)
+                        ax_twin.set_xticklabels(sorted(set(df_deepstrom_kernel_w["subsample_size"])))
+                        # else:
+                        #     ax.set_title("Noyau {} - {} - Train size: {}".format(d_translate_kernel[k_name], DATANAME, t_size))
+                        #     ax_twin.set_xticklabels([])
+
+                        f.set_size_inches(8, 6)
+                        f.tight_layout()
+                        f.subplots_adjust(bottom=0.3)
+                        # f.show()
+                        # exit()
+                        # learnable: change legend
+                        # ODIR = [
+                        #     "/home/luc/PycharmProjects/deepFriedConvnets/main/experiments/graph_drawing/paper/svhn/few_data/parameters/dropout_{}".format(
+                        #         str(drop_val).replace(".", "-"))]
+                        # out_dir_path = ODIR[h]
+
+
+                        if k_name != "rbf":
+                            out_name = "acc_param_tsize_{}_{}_{}".format(t_size, str(drop_val).replace(".", "-"),
+                                                                         k_name)
+                        else:
+                            out_name = "acc_param_tsize_{}_{}_{}_{}".format(t_size, str(drop_val).replace(".", "-"),
+                                                                            k_name,
+                                                                            str(sigma_values[j]).replace(".", "-"))
+
+                        base_out_dir = os.path.join(os.path.abspath(__file__.split(".")[0]), "images")
+                        pathlib.Path(base_out_dir).mkdir(parents=True, exist_ok=True)
+                        out_path = os.path.join(base_out_dir, out_name)
+                        logger.debug(out_path)
+                        f.savefig(out_path)
diff --git a/main/experiments/graph_drawing/till_october_2018/transfert_few_data_cifar100_from_cifar10/vgg_deepstrom_few_data_cifar100_from_cifar10.py b/main/experiments/graph_drawing/till_october_2018/transfert_few_data_cifar100_from_cifar10/vgg_deepstrom_few_data_cifar100_from_cifar10.py
new file mode 100644
index 0000000000000000000000000000000000000000..978978b94b39a2c7bdeb2509350692fed240b60e
--- /dev/null
+++ b/main/experiments/graph_drawing/till_october_2018/transfert_few_data_cifar100_from_cifar10/vgg_deepstrom_few_data_cifar100_from_cifar10.py
@@ -0,0 +1,254 @@
+import os
+
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import pathlib
+from skluc.main.utils import logger
+
+matplotlib.rcParams.update({'font.size': 14})
+
+# pd.set_option('display.width', 1000)
+pd.set_option('display.expand_frame_repr', False)
+
+# DAT = ["SVHN"]
+# DIR = ["/home/luc/Resultats/Deepstrom/october_2018/transfert_few_data"]
+
+
+DATANAME = "SVHN"
+DIRNAME = "/home/luc/Resultats/Deepstrom/october_2018/transfert_few_data"
+
+FILENAME = "gathered_results_all.csv"
+
+min_acc = 0.00
+max_acc = 1.05
+# max_acc = 1.0
+linewidth = 0.9
+output_conv_dim = 512
+nb_classes = 10
+
+real_nys_marker = "s"
+
+learned_nys_marker = "x"
+
+linearity_color = "g"
+
+dense_marker = "v"
+dense_color = "r"
+
+deepfried_marker = "8"
+deepfried_color = "b"
+
+d_translate_kernel = {
+    "linear": "Linear",
+    "chi2_cpd": "Chi2",
+    "rbf": "Gaussian"
+}
+
+if __name__ == '__main__':
+    filepath = os.path.join(DIRNAME, FILENAME)
+    field_names = ["method_name",
+                   "accuracy_val",
+                   "accuracy_test",
+                   "runtime_train",
+                   "runtime_val",
+                   "runtime_test",
+                   "number_epoch",
+                   "batch_size",
+                   "repr_dim",
+                   "second_layer_size",
+                   "kernel_deepstrom",
+                   "gamma_kernel",
+                   "constante_sigmoid",
+                   "nb_layer_deepfried",
+                   "subsample_size",
+                   "validation_size",
+                   "seed",
+                   "act",
+                   "non_linearity",
+                   "real_nystrom",
+                   "repr_quality",
+                   "train_size",
+                   "dropout",
+                   "dataset",
+                   "real_deepfried"
+                   ]
+
+    df = pd.read_csv(filepath, names=field_names)
+    df = df[df["accuracy_val"] != 'None']
+    df = df.apply(pd.to_numeric, errors="ignore")
+    df = df.drop_duplicates()
+    method_names = set(df["method_name"].values)
+    kernel_names = set(df["kernel_deepstrom"].values)
+    kernel_names.remove("None")
+    # kernel_names.remove("laplacian")
+    repr_dim = set(df["repr_dim"].values)
+    repr_dim.remove("None")  # dtype: str
+    # repr_dim.remove("16")
+    nys_size = set(df["subsample_size"].values)
+    nys_size.remove("None")
+    nb_layers_deepfried = set(df["nb_layer_deepfried"].values)
+    nb_layers_deepfried.remove("None")
+    seed_values = set(df["seed"].values)
+    batch_size = 128
+    train_sizes = set(df["train_size"])
+
+    cut_layers = set(df["repr_quality"].values)
+
+    logger.debug("Nystrom possible sizes are: {}".format(nys_size))
+    logger.debug("Kernel functions are: {}".format(kernel_names))
+    logger.debug("Compared network types are: {}".format(method_names))
+    logger.debug("Tested representation dimension are: {}".format(repr_dim))
+
+    means_deepstrom = {}
+
+    for t_size in sorted(list(train_sizes)):
+        df_tsize = df[df["train_size"] == t_size]
+
+        for cut_layer in cut_layers:
+            df_cut_layer = df_tsize[df_tsize["repr_quality"] == cut_layer]
+
+            # plot deepstrom
+            # ==============
+            df_deepstrom = df_cut_layer[df_cut_layer["method_name"] == "deepstrom"]
+            df_deepstrom["subsample_size"] = df_deepstrom["subsample_size"].astype(np.int)
+            df_deepstrom_sort = df_deepstrom.sort_values(by=["subsample_size"])
+            for k_name in sorted(kernel_names):
+                df_deepstrom_kernel = df_deepstrom_sort[df_deepstrom_sort["kernel_deepstrom"] == k_name]
+
+                f, ax = plt.subplots()
+
+                # get the results of learned nystrom
+                df_deepstrom_kernel_w = df_deepstrom_kernel[df_deepstrom_kernel["real_nystrom"] == False]
+                all_accs_w = np.array([
+                    list(df_deepstrom_kernel_w[df_deepstrom_kernel_w["seed"] == seed_v]["accuracy_test"]) for
+                    seed_v in seed_values
+                ])
+                np_deepstrom_kernel_w_mean_accuracy_test = np.mean(all_accs_w, axis=0)
+                np_deepstrom_kernel_w_std_accuracy_test = np.std(all_accs_w, axis=0)
+                np_param_nbr_deepstrom_kernel_w = (
+                        np.square(np.array(sorted(set(df_deepstrom_kernel_w["subsample_size"])))) +  # m x m
+                        np.array(
+                            sorted(set(df_deepstrom_kernel_w["subsample_size"]))) * output_conv_dim +  # m x d
+                        np.array(
+                            sorted(list(set(df_deepstrom_kernel_w["subsample_size"])))) * nb_classes)  # m x c
+
+                ax.errorbar(np_param_nbr_deepstrom_kernel_w,
+                            np_deepstrom_kernel_w_mean_accuracy_test,
+                            np_deepstrom_kernel_w_std_accuracy_test,
+                            marker=learned_nys_marker, color=linearity_color,
+                            label="Adaptative Deepström",
+                            capsize=3)
+
+                # get the results of vanilla nystrom
+                df_deepstrom_kernel_k = df_deepstrom_kernel[df_deepstrom_kernel["real_nystrom"]]
+                if len(df_deepstrom_kernel_k):
+                    all_accs_k = np.array([
+                        list(df_deepstrom_kernel_k[df_deepstrom_kernel_k["seed"] == seed_v]["accuracy_test"]) for
+                        seed_v in seed_values
+                    ])
+                    np_deepstrom_kernel_k_mean_accuracy_test = np.mean(all_accs_k, axis=0)
+                    np_deepstrom_kernel_k_std_accuracy_test = np.std(all_accs_k, axis=0)
+
+                    np_param_nbr_deepstrom_kernel_k = (
+                            np.square(np.array(sorted(set(df_deepstrom_kernel_k["subsample_size"])))) +  # m x m
+                            np.array(sorted(
+                                set(df_deepstrom_kernel_k["subsample_size"]))) * output_conv_dim +  # m x d
+                            np.array(sorted(
+                                list(set(df_deepstrom_kernel_k["subsample_size"])))) * nb_classes)  # m x c
+
+                    ax.errorbar(np_param_nbr_deepstrom_kernel_k,
+                                np_deepstrom_kernel_k_mean_accuracy_test,
+                                np_deepstrom_kernel_k_std_accuracy_test,
+                                marker=real_nys_marker, color=linearity_color,
+                                label="Deepström",
+                                capsize=3)
+
+                # plot dense
+                # ==========
+                df_dense = df_cut_layer[df_cut_layer["method_name"] == "dense"]
+                df_dense = df_dense[df_dense["train_size"] == t_size]
+                df_dense["repr_dim"] = df_dense["repr_dim"].astype(np.int)
+                df_dense = df_dense.sort_values(by=["repr_dim"])
+                np_dense_mean_accuracy_test = np.mean(
+                    np.array([list(df_dense[df_dense["seed"] == seed_v]["accuracy_test"]) for seed_v in
+                              seed_values]), axis=0)
+                np_dense_std_accuracy_test = np.std(
+                    np.array([list(df_dense[df_dense["seed"] == seed_v]["accuracy_test"]) for seed_v in
+                              seed_values]), axis=0)
+                ax.errorbar(
+                    np.array(sorted([int(n) for n in np.unique(df_dense["repr_dim"])])) * output_conv_dim +
+                    np.array(sorted([int(n) for n in np.unique(df_dense["repr_dim"])])) * nb_classes,
+                    np_dense_mean_accuracy_test,
+                    np_dense_std_accuracy_test,
+                    color=dense_color,
+                    marker=dense_marker,
+                    label="Fully Connected", capsize=3)
+
+                # # plot deepfried
+                # # ==============
+                df_deepfried = df_cut_layer[df_cut_layer["method_name"] == "deepfriedconvnet"]
+                np_deepfried_mean_accuracy_test = []
+                np_deepfried_std_accuracy_test = []
+                for l_nb in sorted(nb_layers_deepfried):
+                    df_deepfried_stack = df_deepfried[df_deepfried["nb_layer_deepfried"] == l_nb]
+                    if len(df_deepfried_stack):
+                        np_deepfried_mean_accuracy_test.append(np.mean(df_deepfried_stack["accuracy_test"]))
+                        np_deepfried_std_accuracy_test.append(np.std(df_deepfried_stack["accuracy_test"]))
+
+                nb_param_vals = [(output_conv_dim * 3 + output_conv_dim * nb_classes) * int(i) for i in sorted(set(df_deepfried["nb_layer_deepfried"].values))]
+                ax.errorbar(nb_param_vals,
+                            np_deepfried_mean_accuracy_test,
+                            np_deepfried_std_accuracy_test,
+                            color=deepfried_color,
+                            marker=deepfried_marker,
+                            label="Adaptative DeepFriedConvnet", capsize=3)
+
+
+                ax.set_ylim(min_acc, max_acc)
+                ax.set_ylabel("Accuracy")
+                ax.set_xticks([1e4, 1e5, 1e6])
+                # if i == 2:
+                # ax.set_xlabel("# Parameters")
+                ax.set_xlabel("# Parameters")
+                ax.legend(bbox_to_anchor=(0.5, -0.20), loc="upper center", ncol=2)
+                ax.set_xticklabels([1e4, 1e5, 1e6])
+                # else:
+                #     ax.set_xticklabels([])
+                ax.set_xscale("symlog")
+
+                ax_twin = ax.twiny()
+                ax_twin.set_xscale("symlog")
+                ax_twin.set_xlim(ax.get_xlim())
+                ax_twin.set_xticks(np_param_nbr_deepstrom_kernel_w)
+
+                # if i == 0:
+                ax_twin.set_xlabel("Subsample Size")
+                ax.set_title(
+                    "{} Kernel - {} - Train size: {}".format(d_translate_kernel[k_name], DATANAME, t_size),
+                    y=1.2)
+                ax_twin.set_xticklabels(sorted(set(df_deepstrom_kernel_w["subsample_size"])))
+                # else:
+                #     ax.set_title("Noyau {} - {} - Train size: {}".format(d_translate_kernel[k_name], DATANAME, t_size))
+                #     ax_twin.set_xticklabels([])
+
+                f.set_size_inches(8, 6)
+                f.tight_layout()
+                f.subplots_adjust(bottom=0.3)
+                # f.show()
+                # exit()
+                # learnable: change legend
+                # ODIR = [
+                #     "/home/luc/PycharmProjects/deepFriedConvnets/main/experiments/graph_drawing/paper/svhn/few_data/parameters/dropout_{}".format(
+                #         str(drop_val).replace(".", "-"))]
+                # out_dir_path = ODIR[h]
+
+
+                out_name = "acc_param_tsize_{}_{}_{}".format(t_size, cut_layer, k_name)
+
+                base_out_dir = os.path.join(os.path.abspath(__file__.split(".")[0]), "images")
+                pathlib.Path(base_out_dir).mkdir(parents=True, exist_ok=True)
+                out_path = os.path.join(base_out_dir, out_name)
+                logger.debug(out_path)
+                f.savefig(out_path)
diff --git a/main/experiments/graph_drawing/vgg_cifar_deepstrom_big_subsample.py b/main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_big_subsample.py
similarity index 100%
rename from main/experiments/graph_drawing/vgg_cifar_deepstrom_big_subsample.py
rename to main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_big_subsample.py
diff --git a/main/experiments/graph_drawing/vgg_cifar_deepstrom_little_subsample.py b/main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_little_subsample.py
similarity index 100%
rename from main/experiments/graph_drawing/vgg_cifar_deepstrom_little_subsample.py
rename to main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_little_subsample.py
diff --git a/main/experiments/graph_drawing/vgg_cifar_deepstrom_relu.py b/main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_relu.py
similarity index 100%
rename from main/experiments/graph_drawing/vgg_cifar_deepstrom_relu.py
rename to main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_relu.py
diff --git a/main/experiments/graph_drawing/vgg_cifar_deepstrom_runtime.py b/main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_runtime.py
similarity index 100%
rename from main/experiments/graph_drawing/vgg_cifar_deepstrom_runtime.py
rename to main/experiments/graph_drawing/till_october_2018/vgg_cifar_deepstrom_runtime.py
diff --git a/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data.yml b/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2224de04489bc0d546da6f22f2de037266eced93
--- /dev/null
+++ b/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data.yml
@@ -0,0 +1,61 @@
+all:
+  dense:
+  deepfried:
+  deepstrom_real_gamma:
+  deepstrom_real_no_kernel_param:
+  deepstrom_learned_gamma:
+  deepstrom_learned_no_kernel_param:
+
+base:
+  epoch_numbers: {"-e": [50]}
+  batch_sizes: {"-s": [64]}
+  val_size: {"-v": [10000]}
+  seed: {"-a": "range(5)"}
+  dropout: {"-d": [0.5, 0.7, 1.0]}
+  data_size: {"-t":[20, 50, 100, 200, 500, 1000, 2000]}
+  dataset: ["--svhn"]
+
+gammavalue:
+  gamma: {"-g": [0.0001, 0.001, 0.0025145440260884045, 0.01, 0.1]}
+
+dense:
+  network: ["dense"]
+  base:
+  repr_dim: {"-D": [16, 64, 128, 1024]}
+
+deepfried:
+  network: ["deepfriedconvnet"]
+  base:
+  gammavalue:
+  nbstacks: {"-N": [1]}
+
+deepstrom_base:
+  network: ["deepstrom"]
+  base:
+  nys_size: {"-m": [16, 64, 128, 256, 512, 1024]}
+
+deepstrom_real:
+  deepstrom_base:
+  real_nys: ["-r"]
+
+deepstrom_real_gamma:
+  deepstrom_real:
+  gammavalue:
+  kernel: ["-R"]
+
+deepstrom_real_no_kernel_param:
+  deepstrom_real:
+  kernel: ["-C", "-L"]
+
+deepstrom_learned:
+  deepstrom_base:
+
+deepstrom_learned_gamma:
+  deepstrom_learned:
+  gammavalue:
+  kernel: ["-R"]
+
+deepstrom_learned_no_kernel_param:
+  deepstrom_learned:
+  kernel: ["-C", "-L"]
+
diff --git a/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data_cifar100_from_cifar10.yml b/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data_cifar100_from_cifar10.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4dc38842bd4650815b3e1df3502179acc7ec1b93
--- /dev/null
+++ b/main/experiments/parameter_files/october_2018/lazyfile_transfert_few_data_cifar100_from_cifar10.yml
@@ -0,0 +1,33 @@
+all:
+  dense:
+  deepfried:
+  deepstrom:
+
+base:
+  epoch_numbers: {"-e": [100]}
+  batch_sizes: {"-s": [64]}
+  val_size: {"-v": [10000]}
+  seed: {"-a": "range(10)"}
+  quiet: ["-q"]
+  data_size: {"-t":[20, 50, 100, 200, 500, 1000, 2000]}
+  dataset: ["--svhn"]
+  weights: {"-W": ["cifar100"]}
+  cut_layer: {"-B": ["block3_pool", "block5_conv4", "block5_pool"]}
+
+dense:
+  network: ["dense"]
+  base:
+  repr_dim: {"-D": [16, 64, 128, 1024]}
+
+deepfried:
+  network: ["deepfriedconvnet"]
+  base:
+  nbstacks: {"-N": [1, 3, 5, 7]}
+
+deepstrom:
+  network: ["deepstrom"]
+  base:
+  real_nys: ["-r", ""]
+  nys_size: {"-m": [16, 64, 128, 256, 512, 1024]}
+  kernel: ["-C", "-L", "-R"]
+