diff --git a/cm-code/bert-minimal.py b/cm-code/bert-minimal.py
new file mode 100755
index 0000000000000000000000000000000000000000..477e1e44f3d67ceb5eeb6c6394f0e2f81cf4bd07
--- /dev/null
+++ b/cm-code/bert-minimal.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import torch
+from transformers import AutoModel, AutoTokenizer
+
+name  = 'almanach/camembert-base'
+#sent  = "Des poids lourds et engins en feu \
+#         dans une entreprise en Vendée ."
+#sent = "La gare routière attend toujours ses illuminations ."
+sent = "Quelle surprise ! Arturo a la covid"
+tok   = AutoTokenizer.from_pretrained(name)
+model = AutoModel.from_pretrained(name)
+
+tok_sent = tok(sent.split(), is_split_into_words=True, 
+               return_tensors='pt')
+tok_ids  = tok_sent['input_ids'][0]
+decoded = tok.convert_ids_to_tokens(tok_ids) 
+print(decoded)
+print(tok_sent.word_ids())
+with torch.no_grad(): # no training
+  embeds = model(**tok_sent)['last_hidden_state'][0]
+print(embeds.shape)
diff --git a/cm-code/selfattn.py b/cm-code/selfattn.py
new file mode 100755
index 0000000000000000000000000000000000000000..12ea5f0828f65da4c6e5eb5bf798bb951948b58d
--- /dev/null
+++ b/cm-code/selfattn.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+import numpy as np
+from scipy.special import softmax
+
+################################################################################
+
+def selfattention(X, WK, WQ, WV, bidir=False):
+  K = X @ WK
+  Q = X @ WQ
+  V = X @ WV
+  print(f"K=\n{K}\n\nQ=\n{Q}\n\nV=\n{V}\n")
+
+  scores = Q @ K.T
+  print(f"scores=\n{scores}\n")
+  if not bidir :
+    mask = np.zeros(scores.shape)
+    mask[np.triu_indices(scores.shape[0], 1)] = -np.inf
+    scores = scores + mask
+    print(f"mask=\n{mask}\n\nscores-masked=\n{scores}\n")
+
+  alpha = softmax(scores, axis=1)
+  print(f"alpha=\n{np.round(alpha,2)}\n")
+
+  A = alpha @ V
+  print(f"A=\n{np.round(A, 2)}\n") # 
+  return A
+  
+  #Y2 = (V.T @ alpha.T).T
+  #print("Y2={}\n".format(np.round(Y2,2)))
+  
+################################################################################
+
+WK= np.array([[0, 0, 1], [1, 1, 0], [0, 1, 0], [1, 1, 0]]) # input 4 dim
+WQ = np.array([[1, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 1]]) # input 4 dim
+WV = np.array([[0, 2, 0], [0, 3, 0], [1, 0, 3], [1, 1, 0]]) # input 4 dim
+
+X = np.array([[1, 0, 1, 0],[0, 2, 0, 2],[1, 1, 1, 1]]) # input 4 dim
+print(f"X=\n{X}\n")
+
+selfattention(X, WK, WQ, WV, bidir=False)
+
+#X = np.array([[1., 0., 1.],[0., 2., 0.],[1., 1., 1.]])
+
+#WK= np.array([[0, 0, 1], [1, 1, 0], [0, 1, 0]])#, [1, 1, 0]])
+#WQ = np.array([[1, 0, 1], [1, 0, 0], [0, 0, 1]])#, [0, 1, 1]])
+#WV = np.array([[0, 2, 0], [0, 3, 0], [1, 0, 3]])#, [1, 1, 0]])
+
+
+
+
+
+
+
+
+
+
+