diff --git a/skluc/main/keras_/__init__.py b/skluc/examples/tasks/__init__.py
similarity index 100%
rename from skluc/main/keras_/__init__.py
rename to skluc/examples/tasks/__init__.py
diff --git a/skluc/main/keras_/kernel_approximation/__init__.py b/skluc/examples/tasks/classification/__init__.py
similarity index 100%
rename from skluc/main/keras_/kernel_approximation/__init__.py
rename to skluc/examples/tasks/classification/__init__.py
diff --git a/skluc/examples/tasks/classification/omniglot/__init__.py b/skluc/examples/tasks/classification/omniglot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/skluc/examples/tasks/classification/omniglot/omniglot_28x28_lenet.py b/skluc/examples/tasks/classification/omniglot/omniglot_28x28_lenet.py
new file mode 100644
index 0000000000000000000000000000000000000000..9133933cf2399da9c7ce4a173bb3a670d371ce70
--- /dev/null
+++ b/skluc/examples/tasks/classification/omniglot/omniglot_28x28_lenet.py
@@ -0,0 +1,88 @@
+import time
+
+import numpy as np
+from keras import optimizers
+from keras.callbacks import LearningRateScheduler, TensorBoard
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Flatten
+from keras.models import Sequential
+from keras.preprocessing.image import ImageDataGenerator
+
+import skluc.main.data.mldatasets as dataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+
+
+def scheduler(epoch):
+    """
+    Function to pass to the "LearningrateScheduler"
+
+    :param epoch:
+    :return:
+    """
+    if epoch < 80:
+        return 0.1
+    if epoch < 160:
+        return 0.01
+    return 0.001
+
+
+def model_definition():
+    model = Sequential()
+    model.add(
+        Conv2D(6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=input_shape))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Conv2D(16, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Flatten())
+    model.add(Dense(120, activation='relu', kernel_initializer='he_normal'))
+    model.add(Dense(84, activation='relu', kernel_initializer='he_normal'))
+    model.add(Dense(num_classes, activation='softmax', kernel_initializer='he_normal'))
+    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
+    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
+    return model
+
+
+if __name__ == "__main__":
+    validation_size = 10000
+    seed = 0
+    num_classes = 964
+    batch_size = 128
+    epochs = 2000
+    dropout = 0.5
+    weight_decay = 0.0001
+    input_shape = (28, 28, 1)
+    log_filepath = r'./lenet_retrain_logs/'
+
+    data = dataset.OmniglotDataset(validation_size=1000, seed=seed)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resizetrans = ResizeTransformer(data.s_name, (28, 28))
+    data.apply_transformer(resizetrans)
+    (x_train, y_train), (x_test, y_test) = data.train, data.test
+    x_val, y_val = data.validation
+
+    model = model_definition()
+
+    tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
+    change_lr = LearningRateScheduler(scheduler)
+    cbks = [change_lr, tb_cb]
+
+    print('Using real-time data augmentation.')
+    datagen = ImageDataGenerator(horizontal_flip=True,
+                                 width_shift_range=0.125, height_shift_range=0.125, fill_mode='constant', cval=0.)
+
+    datagen.fit(x_train)
+
+    iterations = int(data.train.data.shape[0] / batch_size)
+    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
+                        steps_per_epoch=iterations,
+                        epochs=epochs,
+                        callbacks=cbks,
+                        validation_data=(x_val[:200], y_val[:200]))
+
+    model.save('{}_lenet_omniglot.h5'.format(time.time()))
+    print("Final evaluation on val set: {}".format(model.evaluate(x_val, y_val)))
diff --git a/skluc/examples/tasks/classification/omniglot/omniglot_28x28_simple.py b/skluc/examples/tasks/classification/omniglot/omniglot_28x28_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..d03d477a2137ec34c1eda771d3f6abbd8e99926d
--- /dev/null
+++ b/skluc/examples/tasks/classification/omniglot/omniglot_28x28_simple.py
@@ -0,0 +1,88 @@
+import time
+
+import numpy as np
+from keras import optimizers
+from keras.callbacks import LearningRateScheduler, TensorBoard
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Flatten
+from keras.models import Sequential
+from keras.preprocessing.image import ImageDataGenerator
+
+import skluc.main.data.mldatasets as dataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+
+
+def scheduler(epoch):
+    """
+    Function to pass to the "LearningrateScheduler"
+
+    :param epoch:
+    :return:
+    """
+    if epoch < 80:
+        return 0.1
+    if epoch < 160:
+        return 0.01
+    return 0.001
+
+
+def model_definition():
+    model = Sequential()
+    model.add(
+        Conv2D(6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=input_shape))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Conv2D(16, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Flatten())
+    model.add(Dense(120, activation='relu', kernel_initializer='he_normal'))
+    model.add(Dense(84, activation='relu', kernel_initializer='he_normal'))
+    model.add(Dense(num_classes, activation='softmax', kernel_initializer='he_normal'))
+    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
+    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
+    return model
+
+
+if __name__ == "__main__":
+    validation_size = 10000
+    seed = 0
+    num_classes = 964
+    batch_size = 128
+    epochs = 2000
+    dropout = 0.5
+    weight_decay = 0.0001
+    input_shape = (28, 28, 1)
+    log_filepath = r'./simple_retrain_logs/'
+
+    data = dataset.OmniglotDataset(validation_size=1000, seed=seed)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resizetrans = ResizeTransformer(data.s_name, (28, 28))
+    data.apply_transformer(resizetrans)
+    (x_train, y_train), (x_test, y_test) = data.train, data.test
+    x_val, y_val = data.validation
+
+    model = model_definition()
+
+    tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
+    change_lr = LearningRateScheduler(scheduler)
+    cbks = [change_lr, tb_cb]
+
+    print('Using real-time data augmentation.')
+    datagen = ImageDataGenerator(horizontal_flip=True,
+                                 width_shift_range=0.125, height_shift_range=0.125, fill_mode='constant', cval=0.)
+
+    datagen.fit(x_train)
+
+    iterations = int(data.train.data.shape[0] / batch_size)
+    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
+                        steps_per_epoch=iterations,
+                        epochs=epochs,
+                        callbacks=cbks,
+                        validation_data=(x_val[:200], y_val[:200]))
+
+    model.save('{}_simple_omniglot.h5'.format(time.time()))
+    print("Final evaluation on val set: {}".format(model.evaluate(x_val, y_val)))
diff --git a/skluc/examples/tasks/classification/omniglot/omniglot_32x32_vgg19.py b/skluc/examples/tasks/classification/omniglot/omniglot_32x32_vgg19.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d6ab67d915e272c9f1ca796e18d969edddb48
--- /dev/null
+++ b/skluc/examples/tasks/classification/omniglot/omniglot_32x32_vgg19.py
@@ -0,0 +1,181 @@
+import time
+
+import keras
+import numpy as np
+from keras import optimizers
+from keras.callbacks import LearningRateScheduler, TensorBoard
+from keras.initializers import he_normal
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Dropout, Activation, Flatten
+from keras.layers.normalization import BatchNormalization
+from keras.models import Sequential
+from keras.preprocessing.image import ImageDataGenerator
+
+import skluc.main.data.mldatasets as dataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+
+
+def scheduler(epoch):
+    """
+    Function to pass to the "LearningrateScheduler"
+
+    :param epoch:
+    :return:
+    """
+    if epoch < 80:
+        return 0.1
+    if epoch < 160:
+        return 0.01
+    return 0.001
+
+
+def model_definition():
+    model = Sequential()
+
+    # Block 1
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block1_conv1', input_shape=input_shape))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block1_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
+
+    # Block 2
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block2_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block2_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
+
+    # Block 3
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
+
+    # Block 4
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
+
+    # Block 5
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
+
+    model.add(Flatten(name='flatten'))
+    model.add(Dense(512, use_bias=True, kernel_regularizer=keras.regularizers.l2(weight_decay),
+                    kernel_initializer=he_normal(), name='fc_cifa10'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Dropout(dropout))
+    model.add(
+        Dense(512, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Dropout(dropout))
+
+    model.add(Dense(num_classes, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(),
+                    name='predictions'))
+
+    model.add(BatchNormalization())
+    model.add(Activation('softmax'))
+
+    # -------- optimizer setting -------- #
+    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
+    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
+    return model
+
+
+if __name__ == "__main__":
+    validation_size = 10000
+    seed = 0
+    num_classes = 964
+    batch_size = 128
+    epochs = 2000
+    dropout = 0.5
+    weight_decay = 0.0001
+    input_shape = (32, 32, 1)
+    log_filepath = r'./vgg19_retrain_logs/'
+
+    data = dataset.OmniglotDataset(validation_size=1000, seed=seed)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resizetrans = ResizeTransformer(data.s_name, (32, 32))
+    data.apply_transformer(resizetrans)
+    (x_train, y_train), (x_test, y_test) = data.train, data.test
+    x_val, y_val = data.validation
+
+    model = model_definition()
+
+    tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
+    change_lr = LearningRateScheduler(scheduler)
+    cbks = [change_lr, tb_cb]
+
+    print('Using real-time data augmentation.')
+    datagen = ImageDataGenerator(horizontal_flip=True,
+                                 width_shift_range=0.125, height_shift_range=0.125, fill_mode='constant', cval=0.)
+
+    datagen.fit(x_train)
+
+    iterations = int(data.train.data.shape[0] / batch_size)
+    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
+                        steps_per_epoch=iterations,
+                        epochs=epochs,
+                        callbacks=cbks,
+                        validation_data=(x_val[:200], y_val[:200]))
+
+    model.save('{}_vgg19_omniglot.h5'.format(time.time()))
+    print("Final evaluation on val set: {}".format(model.evaluate(x_val, y_val)))
diff --git a/skluc/examples/tasks/verification/__init__.py b/skluc/examples/tasks/verification/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/skluc/examples/tasks/verification/omniglot/__init__.py b/skluc/examples/tasks/verification/omniglot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_lenet.py b/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_lenet.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94bcc14472d07848292c57a0ae64c763a42946a
--- /dev/null
+++ b/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_lenet.py
@@ -0,0 +1,124 @@
+import time
+
+# import dill as pickle
+import numpy as np
+import numpy.random as rng
+from keras import backend as K
+from keras.callbacks import TensorBoard
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Flatten
+from keras.layers import Input, Lambda
+from keras.models import Model
+from keras.models import Sequential
+from keras.optimizers import Adam
+
+from skluc.main.data.mldatasets import OmniglotDataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+from skluc.main.tensorflow_.utils import batch_generator
+
+
+def W_init(shape, name=None):
+    values = rng.normal(loc=0, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def b_init(shape, name=None):
+    values = rng.normal(loc=0.5, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def pairwise_output_shape(shapes):
+    shape1, shape2 = shapes
+    return shape1
+
+
+def build_lenet_model():
+    model = Sequential()
+    model.add(
+        Conv2D(6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=input_shape))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Conv2D(16, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
+    model.add(Flatten())
+    model.add(Dense(120, activation='relu', kernel_initializer='he_normal'))
+    model.add(Dense(84, activation='relu', kernel_initializer='he_normal'))
+    return model
+
+
+def model_definition():
+    x1 = Input(input_shape)
+    x2 = Input(input_shape)
+
+    convnet_model = build_lenet_model()
+
+    repr_x1 = convnet_model(x1)
+    repr_x2 = convnet_model(x2)
+
+    pairwise_l1_dis = Lambda(lambda x: K.abs(x[0] - x[1]), output_shape=pairwise_output_shape)([repr_x1, repr_x2])
+    prediction = Dense(1, activation="sigmoid", bias_initializer=b_init)(pairwise_l1_dis)
+
+    siamese_net = Model(inputs=[x1, x2], outputs=prediction)
+
+    optimizer = Adam(6e-5)
+    siamese_net.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=['accuracy'])
+
+    return convnet_model, siamese_net
+
+
+if __name__ == "__main__":
+    input_shape = (28, 28, 1)
+    validation_size = 1000
+    n_epoch = 200
+    batch_size = 128
+    eval_every = 10
+    nb_pairs = 30000
+    tensorboard_path = "./siamese_lenet_tensorboard"
+
+    convnet_model, siamese_net = model_definition()
+
+    tb_cb = TensorBoard(log_dir=tensorboard_path, histogram_freq=0)
+    cbks = [tb_cb]
+
+    data = OmniglotDataset(validation_size=validation_size, seed=0)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resize_trans = ResizeTransformer(data.s_name, output_shape=input_shape[:-1])
+    data.apply_transformer(resize_trans)
+
+    same_pairs = data.get_n_pairs(data.train.labels, True, nb_pairs // 2)
+    x_same, y_same = [data.train.data[same_pairs[:, 0]], data.train.data[same_pairs[:, 1]]], np.ones(nb_pairs // 2)
+    diff_pairs = data.get_n_pairs(data.train.labels, False, nb_pairs // 2)
+    x_diff, y_diff = [data.train.data[diff_pairs[:, 0]], data.train.data[diff_pairs[:, 1]]], np.zeros(nb_pairs // 2)
+
+    x_train, y_train = np.array([np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))]), np.hstack(
+        (y_same, y_diff))
+    x_train = np.swapaxes(x_train, 0, 1)
+    shuffled_indexes = np.random.permutation(len(y_train))
+
+    i = 0
+    while i < n_epoch:
+        for X_batch, Y_batch in batch_generator(x_train[shuffled_indexes], y_train[shuffled_indexes], batch_size,
+                                                False):
+            loss = siamese_net.train_on_batch([X_batch[:, 0], X_batch[:, 1]], Y_batch)
+        if i % eval_every == 0:
+            same_pairs = data.get_n_pairs(data.validation.labels, True, batch_size // 2)
+            x_same, y_same = [data.validation.data[same_pairs[:, 0]], data.validation.data[same_pairs[:, 1]]], np.ones(
+                batch_size // 2)
+            diff_pairs = data.get_n_pairs(data.validation.labels, False, batch_size // 2)
+            x_diff, y_diff = [data.validation.data[diff_pairs[:, 0]], data.validation.data[diff_pairs[:, 1]]], np.zeros(
+                batch_size // 2)
+
+            x_batch, y_batch = [np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))], np.hstack(
+                (y_same, y_diff))
+
+            result = siamese_net.evaluate(x=x_batch, y=y_batch)
+            print("iteration {}/{}, training loss: {}, eval: {}".format(i + 1, n_epoch, str(loss), str(result)))
+        i += 1
+
+    timing = time.time()
+    siamese_net.save('{}_siamese_lenet_omniglot_full.h5'.format(timing))
+    siamese_net.save('{}_siamese_lenet_omniglot_conv.h5'.format(timing))
diff --git a/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_simple.py b/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a1734b309d968833bb77586f53643452136b079
--- /dev/null
+++ b/skluc/examples/tasks/verification/omniglot/omniglot_28x28_siamese_networks_simple.py
@@ -0,0 +1,132 @@
+import time
+
+# import dill as pickle
+import numpy as np
+import numpy.random as rng
+from keras import backend as K
+from keras.callbacks import TensorBoard
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Flatten
+from keras.layers import Input, Lambda
+from keras.models import Model
+from keras.models import Sequential
+from keras.optimizers import Adam
+from keras.regularizers import l2
+
+from skluc.main.data.mldatasets import OmniglotDataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+from skluc.main.tensorflow_.utils import batch_generator
+
+
+def W_init(shape, name=None):
+    values = rng.normal(loc=0, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def b_init(shape, name=None):
+    values = rng.normal(loc=0.5, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def pairwise_output_shape(shapes):
+    shape1, shape2 = shapes
+    return shape1
+
+
+def build_simple_model():
+    convnet_model = Sequential()
+    # conv2D takes a function with shape and name arguments for initialization
+    convnet_model.add(Conv2D(64, (10, 10), padding="same", activation="relu", input_shape=input_shape,
+                             kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
+    convnet_model.add(MaxPooling2D())
+    convnet_model.add(Conv2D(128, (7, 7), padding="same", activation="relu",
+                             kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
+    convnet_model.add(MaxPooling2D())
+    convnet_model.add(Conv2D(128, (4, 4), padding="same", activation="relu",
+                             kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
+    convnet_model.add(MaxPooling2D())
+    convnet_model.add(Conv2D(256, (4, 4), padding="same", activation="relu",
+                             kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
+    convnet_model.add(Flatten())
+    convnet_model.add(Dense(4096, activation="sigmoid",
+                            kernel_initializer=W_init, kernel_regularizer=l2(1e-3), bias_initializer=b_init))
+    return convnet_model
+
+
+def model_definition():
+    x1 = Input(input_shape)
+    x2 = Input(input_shape)
+
+    convnet_model = build_simple_model()
+
+    repr_x1 = convnet_model(x1)
+    repr_x2 = convnet_model(x2)
+
+    pairwise_l1_dis = Lambda(lambda x: K.abs(x[0] - x[1]), output_shape=pairwise_output_shape)([repr_x1, repr_x2])
+    prediction = Dense(1, activation="sigmoid", bias_initializer=b_init)(pairwise_l1_dis)
+
+    siamese_net = Model(inputs=[x1, x2], outputs=prediction)
+
+    optimizer = Adam(6e-5)
+    siamese_net.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=['accuracy'])
+
+    return convnet_model, siamese_net
+
+
+if __name__ == "__main__":
+    input_shape = (28, 28, 1)
+    validation_size = 1000
+    n_epoch = 200
+    batch_size = 128
+    eval_every = 10
+    nb_pairs = 30000
+    tensorboard_path = "./siamese_simple_tensorboard"
+
+    convnet_model, siamese_net = model_definition()
+
+    tb_cb = TensorBoard(log_dir=tensorboard_path, histogram_freq=0)
+    cbks = [tb_cb]
+
+    data = OmniglotDataset(validation_size=validation_size, seed=0)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resize_trans = ResizeTransformer(data.s_name, output_shape=input_shape[:-1])
+    data.apply_transformer(resize_trans)
+
+    same_pairs = data.get_n_pairs(data.train.labels, True, nb_pairs // 2)
+    x_same, y_same = [data.train.data[same_pairs[:, 0]], data.train.data[same_pairs[:, 1]]], np.ones(nb_pairs // 2)
+    diff_pairs = data.get_n_pairs(data.train.labels, False, nb_pairs // 2)
+    x_diff, y_diff = [data.train.data[diff_pairs[:, 0]], data.train.data[diff_pairs[:, 1]]], np.zeros(nb_pairs // 2)
+
+    x_train, y_train = np.array([np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))]), np.hstack(
+        (y_same, y_diff))
+    x_train = np.swapaxes(x_train, 0, 1)
+    shuffled_indexes = np.random.permutation(len(y_train))
+
+    i = 0
+    while i < n_epoch:
+        for X_batch, Y_batch in batch_generator(x_train[shuffled_indexes], y_train[shuffled_indexes], batch_size,
+                                                False):
+            loss = siamese_net.train_on_batch([X_batch[:, 0], X_batch[:, 1]], Y_batch)
+        if i % eval_every == 0:
+            same_pairs = data.get_n_pairs(data.validation.labels, True, batch_size // 2)
+            x_same, y_same = [data.validation.data[same_pairs[:, 0]], data.validation.data[same_pairs[:, 1]]], np.ones(
+                batch_size // 2)
+            diff_pairs = data.get_n_pairs(data.validation.labels, False, batch_size // 2)
+            x_diff, y_diff = [data.validation.data[diff_pairs[:, 0]], data.validation.data[diff_pairs[:, 1]]], np.zeros(
+                batch_size // 2)
+
+            x_batch, y_batch = [np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))], np.hstack(
+                (y_same, y_diff))
+
+            result = siamese_net.evaluate(x=x_batch, y=y_batch)
+            print("iteration {}/{}, training loss: {}, eval: {}".format(i + 1, n_epoch, str(loss), str(result)))
+        i += 1
+
+    timing = time.time()
+    siamese_net.save('{}_siamese_simple_omniglot_full.h5'.format(timing))
+    siamese_net.save('{}_siamese_simple_omniglot_conv.h5'.format(timing))
diff --git a/skluc/examples/tasks/verification/omniglot/omniglot_32x32_siamese_networks_vgg19.py b/skluc/examples/tasks/verification/omniglot/omniglot_32x32_siamese_networks_vgg19.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae40ca229dbdd8014594063f5ae138b74cbb67e0
--- /dev/null
+++ b/skluc/examples/tasks/verification/omniglot/omniglot_32x32_siamese_networks_vgg19.py
@@ -0,0 +1,216 @@
+import time
+
+# import dill as pickle
+import keras
+import numpy as np
+import numpy.random as rng
+from keras import backend as K
+from keras.callbacks import TensorBoard
+from keras.initializers import he_normal
+from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Dense, Dropout, Activation, Flatten
+from keras.layers import Input, Lambda
+from keras.layers.normalization import BatchNormalization
+from keras.models import Model
+from keras.models import Sequential
+from keras.optimizers import Adam
+
+from skluc.main.data.mldatasets import OmniglotDataset
+from skluc.main.data.transformation.ResizeTransformer import ResizeTransformer
+from skluc.main.tensorflow_.utils import batch_generator
+
+
+def W_init(shape, name=None):
+    values = rng.normal(loc=0, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def b_init(shape, name=None):
+    values = rng.normal(loc=0.5, scale=1e-2, size=shape)
+    return K.variable(values, name=name)
+
+
+def pairwise_output_shape(shapes):
+    shape1, shape2 = shapes
+    return shape1
+
+
+def build_vgg19_model():
+    input_shape = (32, 32, 1)
+    weight_decay = 0.0001
+    dropout = 0.5
+
+    model = Sequential()
+
+    # Block 1
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block1_conv1', input_shape=input_shape))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block1_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
+
+    # Block 2
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block2_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block2_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
+
+    # Block 3
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block3_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
+
+    # Block 4
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block4_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
+
+    # Block 5
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv1'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv3'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
+                     kernel_initializer=he_normal(), name='block5_conv4'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
+
+    # model modification for cifar-10
+    model.add(Flatten(name='flatten'))
+    model.add(Dense(512, use_bias=True, kernel_regularizer=keras.regularizers.l2(weight_decay),
+                    kernel_initializer=he_normal(), name='fc_cifa10'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Dropout(dropout))
+    model.add(
+        Dense(512, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc2'))
+    model.add(BatchNormalization())
+    model.add(Activation('relu'))
+    model.add(Dropout(dropout))
+
+    return model
+
+
+def model_definition():
+    x1 = Input(input_shape)
+    x2 = Input(input_shape)
+
+    convnet_model = build_vgg19_model()
+
+    repr_x1 = convnet_model(x1)
+    repr_x2 = convnet_model(x2)
+
+    pairwise_l1_dis = Lambda(lambda x: K.abs(x[0] - x[1]), output_shape=pairwise_output_shape)([repr_x1, repr_x2])
+    prediction = Dense(1, activation="sigmoid", bias_initializer=b_init)(pairwise_l1_dis)
+
+    siamese_net = Model(inputs=[x1, x2], outputs=prediction)
+
+    optimizer = Adam(6e-5)
+    siamese_net.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=['accuracy'])
+
+    return convnet_model, siamese_net
+
+
+if __name__ == "__main__":
+    input_shape = (32, 32, 1)
+    validation_size = 1000
+    n_epoch = 200
+    batch_size = 128
+    eval_every = 10
+    nb_pairs = 30000
+    tensorboard_path = "./siamese_vgg19_tensorboard"
+
+    convnet_model, siamese_net = model_definition()
+
+    tb_cb = TensorBoard(log_dir=tensorboard_path, histogram_freq=0)
+    cbks = [tb_cb]
+
+    data = OmniglotDataset(validation_size=validation_size, seed=0)
+    data.load()
+    data.normalize()
+    data.to_one_hot()
+    data.data_astype(np.float32)
+    data.labels_astype(np.float32)
+    data.to_image()
+    resize_trans = ResizeTransformer(data.s_name, output_shape=(32, 32))
+    data.apply_transformer(resize_trans)
+
+    same_pairs = data.get_n_pairs(data.train.labels, True, nb_pairs // 2)
+    x_same, y_same = [data.train.data[same_pairs[:, 0]], data.train.data[same_pairs[:, 1]]], np.ones(nb_pairs // 2)
+    diff_pairs = data.get_n_pairs(data.train.labels, False, nb_pairs // 2)
+    x_diff, y_diff = [data.train.data[diff_pairs[:, 0]], data.train.data[diff_pairs[:, 1]]], np.zeros(nb_pairs // 2)
+
+    x_train, y_train = np.array([np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))]), np.hstack(
+        (y_same, y_diff))
+    x_train = np.swapaxes(x_train, 0, 1)
+    shuffled_indexes = np.random.permutation(len(y_train))
+
+    i = 0
+    while i < n_epoch:
+        for X_batch, Y_batch in batch_generator(x_train[shuffled_indexes], y_train[shuffled_indexes], batch_size,
+                                                False):
+            loss = siamese_net.train_on_batch([X_batch[:, 0], X_batch[:, 1]], Y_batch)
+        if i % eval_every == 0:
+            same_pairs = data.get_n_pairs(data.validation.labels, True, batch_size // 2)
+            x_same, y_same = [data.validation.data[same_pairs[:, 0]], data.validation.data[same_pairs[:, 1]]], np.ones(
+                batch_size // 2)
+            diff_pairs = data.get_n_pairs(data.validation.labels, False, batch_size // 2)
+            x_diff, y_diff = [data.validation.data[diff_pairs[:, 0]], data.validation.data[diff_pairs[:, 1]]], np.zeros(
+                batch_size // 2)
+
+            x_batch, y_batch = [np.vstack((x_same[0], x_diff[0])), np.vstack((x_same[1], x_diff[1]))], np.hstack(
+                (y_same, y_diff))
+
+            result = siamese_net.evaluate(x=x_batch, y=y_batch)
+            print("iteration {}/{}, training loss: {}, eval: {}".format(i + 1, n_epoch, str(loss), str(result)))
+        i += 1
+
+    timing = time.time()
+    siamese_net.save('{}_siamese_vgg19_omniglot_full.h5'.format(timing))
+    siamese_net.save('{}_siamese_vgg19_omniglot_conv.h5'.format(timing))
diff --git a/skluc/main/data/mldatasets/OmniglotDataset.py b/skluc/main/data/mldatasets/OmniglotDataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..795f1de27b0a8a7de1c37d5fb24f2fb568ddd3c2
--- /dev/null
+++ b/skluc/main/data/mldatasets/OmniglotDataset.py
@@ -0,0 +1,128 @@
+import os
+import zipfile
+
+import imageio
+import matplotlib.pyplot as plt
+import numpy as np
+
+from skluc.main.data.mldatasets.ImageDataset import ImageDataset
+from skluc.main.utils import LabeledData, create_directory
+from skluc.main.utils import logger, check_files
+
+
+class OmniglotDataset(ImageDataset):
+    # not a good idea to hard code wieght and width since they can change
+    HEIGHT = 105
+    WIDTH = 105
+    DEPTH = 1
+
+    def __init__(self, validation_size=0, seed=None, s_download_dir=None):
+        self.__s_url = ["https://github.com/brendenlake/omniglot/raw/master/python/images_background.zip",
+                        "https://github.com/brendenlake/omniglot/raw/master/python/images_evaluation.zip"
+                        ]
+        self.meta = None
+        name = "omniglot"
+        if s_download_dir is not None:
+            super().__init__(self.__s_url, name, s_download_dir, validation_size=validation_size, seed=seed)
+        else:
+            super().__init__(self.__s_url, name, validation_size=validation_size, seed=seed)
+
+        self.__extracted_dirs = [
+            os.path.join(self.s_download_dir, "images_background"),
+            os.path.join(self.s_download_dir, "images_evaluation")
+        ]
+
+    def get_n_pairs(self, labels, same_class, n):
+        logger.debug("Get {} pairs of {} class.".format(n, "same" if same_class else "different"))
+        unique_labels = np.unique(labels, axis=0)
+        # return_idx_labels = np.empty(n, dtype=np.int)
+        # pairs = [np.zeros((n, self.h, self.w, 1)) for _ in range(2)]
+        pairs = np.empty((0, 2), dtype=np.int)
+        i = 0
+        while i < n:
+            if i % 1000 == 0 and i != 0:
+                logger.debug("Got {}/{}".format(i + 1, n))
+            rand_lab_idx = np.random.choice(len(unique_labels), 1)[0]
+            rand_lab = unique_labels[rand_lab_idx]
+            bool_idx_labels = self.get_bool_idx_label(rand_lab, labels)
+            idx_labs = np.where(bool_idx_labels)[0]
+            if same_class:
+                rand_pair = np.random.choice(idx_labs, 2, replace=True)
+            else:
+                idx_labs_diff = np.setdiff1d(np.arange(len(labels)), idx_labs)
+                rand_first_elm = np.random.choice(idx_labs, 1)[0]
+                rand_second_elm_diff = np.random.choice(idx_labs_diff, 1)[0]
+                rand_pair = np.array([rand_first_elm, rand_second_elm_diff])
+            pairs = np.vstack((pairs, rand_pair))
+            i += 1
+        return pairs
+
+    def get_omniglot_data(self, tag):
+        data_dirname = "images_" + tag
+        data_dirpath = os.path.join(self.s_download_dir, data_dirname)
+        class_index = 0
+        list_of_images = []
+        list_of_labels = []
+        for alphabet_name in os.listdir(data_dirpath):
+            data_alphabet_dirpath = os.path.join(data_dirpath, alphabet_name)
+            for char in os.listdir(data_alphabet_dirpath):
+                charname = alphabet_name + "_" + char[-2:]  # exists in case I need the actual labels
+                data_char_dirpath = os.path.join(data_alphabet_dirpath, char)
+                for char_image_file in os.listdir(data_char_dirpath):
+                    char_image_path = os.path.join(data_char_dirpath, char_image_file)
+                    im = imageio.imread(char_image_path)
+                    list_of_images.append(im.flatten())
+                    list_of_labels.append(class_index)
+
+                class_index += 1
+        return np.array(list_of_images), np.array(list_of_labels)
+
+    def read(self):
+        npzdir_path = os.path.join(self.s_download_dir, "npzfiles")
+        lst_npzfile_paths = [os.path.join(npzdir_path, kw + ".npz")
+                             for kw in self.data_groups_private]
+        create_directory(npzdir_path)
+        if check_files(lst_npzfile_paths):
+            # case npz files already exist
+            logger.debug("Files {} already exists".format(lst_npzfile_paths))
+            logger.info("Loading transformed data from files {}".format(lst_npzfile_paths))
+            self.load_npz(npzdir_path)
+
+        else:
+
+            if not check_files(self.__extracted_dirs):
+                # case zip files dont even exist
+                logger.debug("Extracting {} ...".format(self.l_filepaths))
+                for zip_file in self.l_filepaths:
+                    zip_ref = zipfile.ZipFile(zip_file, 'r')
+                    zip_ref.extractall(self.s_download_dir)
+                    zip_ref.close()
+            else:
+                logger.debug("Files {} have already been extracted".format(self.l_filepaths))
+
+            logger.debug("Get training data of dataset {}".format(self.s_name))
+            self._train = LabeledData(*self.get_omniglot_data('background'))
+
+            logger.debug("Get testing data of dataset {}".format(self.s_name))
+            self._test = LabeledData(*self.get_omniglot_data('evaluation'))
+
+            self._check_validation_size(self._train[0].shape[0])
+
+            self.save_npz()
+
+
+if __name__ == "__main__":
+    import time
+
+    d = OmniglotDataset(validation_size=10000)
+    d.load()
+    d.to_image()
+    d.normalize()
+    for i, im in enumerate(d.train.data):
+        print(im.shape)
+        im = im.squeeze(axis=2)
+        print(im.shape)
+        plt.imshow(im)
+        plt.show()
+        print(d.train.labels[i])
+        time.sleep(1)
diff --git a/skluc/main/keras_/kernel_approximation/nystrom_layer.py b/skluc/main/keras_/kernel_approximation/nystrom_layer.py
deleted file mode 100644
index 5725de1c18575133d8abe981b71f81b72de66c44..0000000000000000000000000000000000000000
--- a/skluc/main/keras_/kernel_approximation/nystrom_layer.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""
-Example of how vgg and deepstrom could be co-trained. (deepstrom on top of vgg convolution layers)
-"""
-from skluc.main.tensorflow_.kernel_approximation import nystrom_layer
-from skluc.main.tensorflow_.utils import classification_cifar, batch_generator
-
-from time import time
-from keras.applications.vgg19 import VGG19
-
-import skluc.main.data.mldatasets as dataset
-import numpy as np
-import tensorflow as tf
-
-
-if __name__ == '__main__':
-    cifar = dataset.Cifar10Dataset(validation_size=1000)
-    cifar.load()
-    cifar.to_image()
-    cifar.to_one_hot()
-    cifar.normalize()
-    cifar.data_astype(np.float32)
-    cifar.labels_astype(np.float32)
-
-    cifar.rescale(2)
-
-    num_epoch = 100
-    batch_size = 54
-    subsample = cifar.train[0][:100]
-
-    input_dim, output_dim = cifar.train[0].shape[1:], cifar.train[1].shape[1]
-    with tf.Graph().as_default():
-        vgg_conv_model = VGG19(include_top=False, weights='imagenet', input_shape=input_dim)
-        i = 0
-        for layer in vgg_conv_model.layers:
-            layer.trainable = False
-            i = i + 1
-            print(i, layer.name)
-
-        x = tf.placeholder(tf.float32, shape=[None, *input_dim], name="x")
-        tf.summary.image("input", x)
-        x_subsample = tf.Variable(subsample, name="x_subsample", trainable=False)
-        y_ = tf.placeholder(tf.float32, shape=[None, output_dim], name="labels")
-        conv_x = vgg_conv_model(x)
-        conv_subsample = vgg_conv_model(x_subsample)
-
-        out_nystrom = nystrom_layer(conv_x, conv_subsample, 1, 100)
-
-        y_conv, keep_prob = classification_cifar(out_nystrom, output_dim=output_dim)
-
-        # # calcul de la loss
-        with tf.name_scope("xent"):
-            cross_entropy = tf.reduce_mean(
-                tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv, name="xentropy"),
-                name="xentropy_mean")
-            tf.summary.scalar('loss-xent', cross_entropy)
-
-        # # calcul du gradient
-        with tf.name_scope("train"):
-            global_step = tf.Variable(0, name="global_step", trainable=False)
-            train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy, global_step=global_step)
-
-        # # calcul de l'accuracy
-        with tf.name_scope("accuracy"):
-            predictions = tf.argmax(y_conv, 1)
-            correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
-            accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
-            tf.summary.scalar("accuracy", accuracy_op)
-
-        init = tf.global_variables_initializer()
-        merged_summary = tf.summary.merge_all()
-        summary_writer = tf.summary.FileWriter("log/".format(time()))
-        # Create a session for running Ops on the Graph.
-        with tf.Session() as sess:
-            summary_writer.add_graph(sess.graph)
-            # Initialize all Variable objects
-            sess.run(init)
-            # actual learning
-            feed_dict_val = {x: cifar.validation[0], y_: cifar.validation[1], keep_prob: 1.0}
-            for i in range(num_epoch):
-                j = 0
-                start = time()
-                for X_batch, Y_batch in batch_generator(cifar.train[0], cifar.train[1], batch_size, True):
-                    feed_dict = {x: X_batch, y_: Y_batch, keep_prob: 0.5}
-                    _, loss = sess.run([train_optimizer, cross_entropy], feed_dict=feed_dict)
-                    if j % 100 == 0:
-                        print('batch {}/{}, loss {} (with dropout), {:.2f}s / batch'
-                              .format((j+1)*(i+1), int(num_epoch*(cifar.train[0].shape[0]/batch_size)), loss, (time()-start)/100))
-                        r_accuracy = sess.run([accuracy_op], feed_dict=feed_dict_val)
-                        summary_str = sess.run(merged_summary, feed_dict=feed_dict)
-                        summary_writer.add_summary(summary_str, (j+1)*(i+1))
-                        start = time()
-                    j += 1
-
-            accuracies = []
-            i = 0
-            for X_batch, Y_batch in batch_generator(cifar.test[0], cifar.test[1], 1000, True):
-                accuracy = sess.run([accuracy_op], feed_dict={
-                    x: X_batch, y_: Y_batch, keep_prob: 1.0})
-                accuracies.append(accuracy[0])
-                i += 1
-
-        print(sum(accuracies) / i)
-
-