diff --git a/skluc/main/tensorflow_/kernel_approximation/nystrom_layer.py b/skluc/main/tensorflow_/kernel_approximation/nystrom_layer.py
index 2d25157e6c147279e2e768a69c67898790d42d1c..9e709c6a4b44706bc0511330f0a6bab0fed705b0 100644
--- a/skluc/main/tensorflow_/kernel_approximation/nystrom_layer.py
+++ b/skluc/main/tensorflow_/kernel_approximation/nystrom_layer.py
@@ -293,6 +293,7 @@ class DeepstromLayer(tf.keras.layers.Layer):
                     initializer=tf.random_normal_initializer(stddev=0.1),
                     trainable=True
                 )
+        super(DeepstromLayer, self).build(input_shape)
 
     def call(self, input_x, **kwargs):
         with tf.name_scope("NystromLayer"):
@@ -301,15 +302,90 @@ class DeepstromLayer(tf.keras.layers.Layer):
             h_conv_nystrom_subsample_flat = tf.reshape(self.__subsample, [self.__subsample_size, init_dim])
             with tf.name_scope("kernel_vec"):
                 kernel_vector = self.__tf_kernel_fct(h_conv_flat, h_conv_nystrom_subsample_flat, **self.__kernel_dict)
+                tf.summary.histogram("kernel_vector", kernel_vector)
 
             if self.__output_dim != 0:
                 out = tf.matmul(kernel_vector, self.__W_matrix)
+                tf.summary.histogram("W_matrix", self.__W_matrix)
             else:
                 out = kernel_vector
         if self.__activation is not None:
             out = self.__activation(out)
         return out
 
+class KernelLayerEndToEnd(tf.keras.layers.Layer):
+    def __init__(self,
+                 subsample_size,
+                 kernel_name,
+                 sum_of_kernels=False,
+                 stack_of_kernels=False,
+                 kernel_dict={}
+                 ):
+
+        def init_kernel():
+            if kernel_name == "rbf":
+                kernel_fct = rbf_kernel
+                tf_kernel_fct = tf_rbf_kernel
+            elif kernel_name == "linear":
+                kernel_fct = linear_kernel
+                tf_kernel_fct = tf_linear_kernel
+            elif kernel_name == "chi2_cpd":
+                kernel_fct = additive_chi2_kernel
+                tf_kernel_fct = tf_chi_square_CPD
+            elif kernel_name == "chi2_exp_cpd":
+                kernel_fct = chi2_kernel
+                tf_kernel_fct = tf_chi_square_CPD_exp
+            elif kernel_name == "chi2_pd":
+                raise NotImplementedError("Bien verifier que ce code ne fait pas bordel")
+            elif kernel_name == "laplacian":
+                tf_kernel_fct = tf_laplacian_kernel
+                kernel_fct = laplacian_kernel
+            else:
+                raise ValueError("Unknown kernel name: {}".format(kernel_name))
+            return kernel_name, kernel_fct, tf_kernel_fct, kernel_dict
+
+
+        super().__init__()
+
+        self.__subsample_size = subsample_size
+
+        self.__sum_of_kernels = sum_of_kernels
+        self.__stack_of_kernels = stack_of_kernels
+
+        self.__kernel_name, self.__kernel_fct, self.__tf_kernel_fct, self.__kernel_dict = init_kernel()
+
+        self.__W_matrix = None
+
+        logger.info("Selecting kernel layer function with "
+                    "subsample size = {}, "
+                    "and kernel = {}"
+                    .format(self.__subsample_size,
+                            self.__kernel_name))
+
+
+    def call(self, inputs, **kwargs):
+        if type(inputs) is not list:
+            raise ValueError("Inputs of layer deepstrom should be a list")
+        if len(inputs[0].shape) != 2:
+            raise ValueError(f"Input x should be 2D but it is {len(inputs[0].shape)}D")
+        if len(inputs[1].shape) != 2:
+            raise ValueError(f"Input subsample should be 2D but it is {len(inputs[1].shape)}D")
+        if inputs[1].shape[0] != self.__subsample_size:
+            raise ValueError(f"Subsample should be of size {self.__subsample_size}")
+        if inputs[0][0].shape[0] != inputs[1][0].shape[0]:
+            raise ValueError(f"Input and subsample should have the same dimension")
+
+        input_x = inputs[0]
+        input_sub = inputs[1]
+        with tf.name_scope("NystromLayer"):
+            with tf.name_scope("kernel_vec"):
+                kernel_vector = self.__tf_kernel_fct(input_x, input_sub, **self.__kernel_dict)
+                logger.debug("Kernel vector computed")
+                tf.summary.histogram("kernel_vector", kernel_vector)
+
+        return kernel_vector
+
+
 
 class DeepstromLayerEndToEnd(tf.keras.layers.Layer):
     def __init__(self,
@@ -395,6 +471,7 @@ class DeepstromLayerEndToEnd(tf.keras.layers.Layer):
                 initializer=tf.random_normal_initializer(stddev=0.1),
                 trainable=True
             )
+        super(DeepstromLayerEndToEnd, self).build(input_shape)
 
     def call(self, inputs, **kwargs):
         if type(inputs) is not list:
@@ -413,9 +490,11 @@ class DeepstromLayerEndToEnd(tf.keras.layers.Layer):
         with tf.name_scope("NystromLayer"):
             with tf.name_scope("kernel_vec"):
                 kernel_vector = self.__tf_kernel_fct(input_x, input_sub, **self.__kernel_dict)
+                tf.summary.histogram("kernel_vector", kernel_vector)
 
             if self.__output_dim != 0:
                 out = tf.matmul(kernel_vector, self.__W_matrix)
+                tf.summary.histogram("W_matrix", self.__W_matrix)
             else:
                 out = kernel_vector
         if self.__activation is not None: