diff --git a/expe/PLE.dic b/expe/PLE.dic
index 2c052ec4aebb93dceb147745604ea808bc370978..6a580e2be22de852b0ccb6a7abeb8228da83adc7 100644
--- a/expe/PLE.dic
+++ b/expe/PLE.dic
@@ -1,55 +1,9 @@
 ##POS
 NULL
 ROOT
-SCONJ
-ADP
-NOUN
-ADJ
-PUNCT
-DET
-VERB
-AUX
-PROPN
-NUM
-CCONJ
-PRON
-ADV
-SYM
-PART
 ##LABEL
 NULL
 ROOT
-case
-fixed
-obl
-amod
-punct
-det
-nsubj
-root
-aux
-xcomp
-obj
-nmod
-nummod
-cc
-conj
-mark
-advcl
-advmod
-csubj
-appos
-flat
-ccomp
-acl
-cop
-compound
-iobj
-expl
-orphan
-parataxis
 ##EOS
 NULL
 ROOT
-0
-1
diff --git a/expe/out/empty b/expe/out/empty
deleted file mode 100644
index 8d1c8b69c3fce7bea45c73efd06983e3c419a92f..0000000000000000000000000000000000000000
--- a/expe/out/empty
+++ /dev/null
@@ -1 +0,0 @@
- 
diff --git a/src/FeatModel.py b/src/FeatModel.py
index d6af63dad76e48d0ab14dbeff11f3b30f79125f2..8c344feca37f69d2f806140d6edf7c8c94446014 100644
--- a/src/FeatModel.py
+++ b/src/FeatModel.py
@@ -54,7 +54,7 @@ class FeatModel:
     def getFeatLabel(self, featIndex):
         return self.featArray[featIndex][3]
 
-    def buildInputVector(self, featVec, dicos):
+    def buildInputVectorOneHot(self, featVec, dicos):
         inputVector = np.zeros(self.inputVectorSize, dtype="int32")
         origin = 0
         for i in range(self.getNbFeat()):
@@ -65,3 +65,16 @@ class FeatModel:
             inputVector[origin + position] = 1
             origin += size
         return inputVector
+
+    def buildInputVector(self, featVec, dicos):
+        inputVector = np.zeros(self.getNbFeat(), dtype="int32")
+        origin = 0
+        for i in range(self.getNbFeat()):
+            label = self.getFeatLabel(i)
+            size = dicos.getDico(label).getSize()
+            position = dicos.getCode(label, featVec[i])
+#            print('featureName = ', featureName, 'value =', featVec[i], 'size =', size, 'position =', position, 'origin =', origin)
+#            print('value =', featVec[i], 'size =', size, 'position =', position, 'origin =', origin)
+            inputVector[i] = position
+            origin += size
+        return inputVector
diff --git a/src/Moves.py b/src/Moves.py
index 4dfc036bb0a85dd58318a321d0ab1200f2f708ce..4232bdb00fea8c6489a1c77573812befb1d82d21 100644
--- a/src/Moves.py
+++ b/src/Moves.py
@@ -36,8 +36,14 @@ class Moves:
             labelCode = int((mvt_Code - 3)/ 2)
             return ('RIGHT', self.dicoLabels.getSymbol(labelCode))
 
-    def buildOutputVector(self, mvt):
+    def buildOutputVectorOneHot(self, mvt):
         outputVector = np.zeros(self.nb, dtype="int32")
         codeMvt = self.mvtCode(mvt)
         outputVector[codeMvt] = 1
         return outputVector
+
+    def buildOutputVector(self, mvt):
+        outputVector = np.zeros(1, dtype="int32")
+        codeMvt = self.mvtCode(mvt)
+        outputVector[0] = codeMvt
+        return outputVector
diff --git a/src/Oracle.py b/src/Oracle.py
index 18f029b0e7aa5a7e9acdfc90e8c6a2735121096d..04d486187c197a0cc576f93f6d62bee1af4f4796 100644
--- a/src/Oracle.py
+++ b/src/Oracle.py
@@ -66,7 +66,7 @@ def oracle(c):
     #print("no movement possible return SHIFT")
     if not c.getBuffer().endReached(): 
         return('SHIFT', '')
-    print("The machine is stucked")
+    print("The machine is stuck")
     exit(1)
     
 
diff --git a/src/mcf2cff.py b/src/mcf2cff.py
index e4d4cc70aeec4ba85536da42530c1a7e47506eba..a0cfd2881d9f900cbbdd4e26938acc2ad45332c9 100644
--- a/src/mcf2cff.py
+++ b/src/mcf2cff.py
@@ -41,13 +41,16 @@ def prepareData(mcd, mcfFile, featModel, moves, filename, wordsLimit) :
         prepareWordBufferForTrain(c.getBuffer())
         while True :
             mvt = Oracle.oracle(c)
-            outputVector = moves.buildOutputVector(mvt)
+            code = moves.mvtCode(mvt)
+#            print("mvt = ", mvt, "code = ", code)
+#            outputVector = moves.buildOutputVector(mvt)
             featVec = c.extractFeatVec(featModel)
             inputVector = featModel.buildInputVector(featVec, dicos)
+#            np.savetxt(dataFile, inputVector, fmt="%s", delimiter='  ', newline=' ')
+#            dataFile.write('\n')
+            np.savetxt(dataFile, [code], fmt="%s", delimiter='  ', newline=' ')
             np.savetxt(dataFile, inputVector, fmt="%s", delimiter='  ', newline=' ')
             dataFile.write('\n')
-            np.savetxt(dataFile, outputVector, fmt="%s", delimiter='  ', newline=' ')
-            dataFile.write('\n')
 
             if(verbose == True) :
                 print("------------------------------------------")
diff --git a/src/remove_non_projective_sentences_from_conll.py b/src/remove_non_projective_sentences_from_conll.py
index f1188994031d47e5ea26933781d6b7c9dd4464b5..3899587a8879f1d3b2ccc5b5daf87a0837cd7ef1 100644
--- a/src/remove_non_projective_sentences_from_conll.py
+++ b/src/remove_non_projective_sentences_from_conll.py
@@ -3,7 +3,7 @@ from WordBuffer import WordBuffer
 from Word import Word
 
 if len(sys.argv) < 2 :
-    print('usage:', sys.argv[0], 'conllFile mcdFile')
+    print('usage:', sys.argv[0], 'conllFile')
     exit(1)
 
 
diff --git a/src/tbp_decode.py b/src/tbp_decode_keras.py
similarity index 100%
rename from src/tbp_decode.py
rename to src/tbp_decode_keras.py
diff --git a/src/tbp_decode_pytorch.py b/src/tbp_decode_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7e139992109bcf2fbae735241be9a2af078b53c
--- /dev/null
+++ b/src/tbp_decode_pytorch.py
@@ -0,0 +1,116 @@
+import sys
+import Oracle
+from Dicos import Dicos
+from Config import Config
+from Word import Word
+from Mcd import Mcd
+from Moves import Moves
+from FeatModel import FeatModel
+import torch
+
+import numpy as np
+
+
+
+def prepareWordBufferForDecode(buffer):
+    """Add to every word of the buffer features GOVREF and LABELREF.
+
+    GOVEREF is a copy of feature GOV and LABELREF a copy of LABEL
+    GOV and LABEL are set to initialization values
+    """
+    for word in buffer.array:
+        word.setFeat('GOV', str(Word.invalidGov()))
+        word.setFeat('LABEL', Word.invalidLabel())
+
+
+verbose = False
+if len(sys.argv) != 7 :
+    print('usage:', sys.argv[0], 'mcf_file model_file dicos_file feat_model mcd_file words_limit')
+    exit(1)
+
+mcf_file =       sys.argv[1]
+model_file =     sys.argv[2]
+dicos_file =     sys.argv[3]
+feat_model =     sys.argv[4]
+mcd_file =       sys.argv[5]
+wordsLimit = int(sys.argv[6])
+
+    
+sys.stderr.write('reading mcd from file :')
+sys.stderr.write(mcd_file)
+sys.stderr.write('\n')
+mcd = Mcd(mcd_file)
+
+sys.stderr.write('loading dicos\n')
+dicos = Dicos(fileName=dicos_file)
+
+moves = Moves(dicos)
+
+sys.stderr.write('reading feature model from file :')
+sys.stderr.write(feat_model)
+sys.stderr.write('\n')
+featModel = FeatModel(feat_model, dicos)
+
+sys.stderr.write('loading model :')
+sys.stderr.write(model_file)
+sys.stderr.write('\n')
+model = load_model(model_file)
+
+inputSize = featModel.getInputSize()
+outputSize = moves.getNb()
+
+c = Config(mcf_file, mcd, dicos)
+numSent = 0
+verbose = False
+numWords = 0
+
+while c.getBuffer().readNextSentence()  and numWords < wordsLimit :
+    c.getStack().empty()
+    prepareWordBufferForDecode(c.getBuffer())
+    numWords += c.getBuffer().getLength()
+
+    while True :
+        featVec = c.extractFeatVec(featModel)
+        inputVector = featModel.buildInputVector(featVec, dicos)
+        outputVector = model.predict(inputVector.reshape((1,inputSize)), batch_size=1, verbose=0, steps=None)
+        mvt_Code = outputVector.argmax()
+        mvt = moves.mvtDecode(mvt_Code)
+
+        if(verbose == True) :
+            print("------------------------------------------")
+            c.affiche()
+            print('predicted move', mvt[0], mvt[1])
+            print(mvt, featVec)
+
+        res = c.applyMvt(mvt)
+        if not res :
+            sys.stderr.write("cannot apply predicted movement\n")
+            mvt_type = mvt[0]
+            mvt_label = mvt[1]
+            if mvt_type != "SHIFT" :
+                sys.stderr.write("try to force SHIFT\n")
+                res = c.shift()
+                if res == False :
+                    sys.stderr.write("try to force REDUCE\n")
+                    res = c.red()
+                    if res == False :
+                        sys.stderr.write("abort sentence\n")
+                        break
+        if(c.isFinal()):
+            break
+    for i in range(1, c.getBuffer().getLength()):
+        w = c.getBuffer().getWord(i)
+        w.affiche(mcd)
+        print('')
+#        print('\t', w.getFeat("GOV"), end='\t')
+#        print(w.getFeat("LABEL"))
+
+    numSent += 1
+#    if numSent % 10 == 0:
+#        print ("Sent : ", numSent)
+
+
+
+
+
+
diff --git a/src/tbp_train.py b/src/tbp_train_keras.py
similarity index 100%
rename from src/tbp_train.py
rename to src/tbp_train_keras.py
diff --git a/src/tbp_train_pytorch.py b/src/tbp_train_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bc52fef82309c714a446b6237cad79806913ef1
--- /dev/null
+++ b/src/tbp_train_pytorch.py
@@ -0,0 +1,78 @@
+import sys
+import numpy as np
+import torch
+from torch import nn
+
+def readData(dataFilename) :
+    allX = []
+    allY = []
+    try:
+#        dataFile = open(dataFilename, encoding='utf-8')
+        dataFile = open(dataFilename)
+    except IOError:
+        print(dataFilename, " : ce fichier n'existe pas")
+        exit(1)
+
+
+        
+    inputSize = int(dataFile.readline())
+    print("input size = ", inputSize)
+    outputSize = int(dataFile.readline())
+    print("output size = ", outputSize)
+
+    inputLine = True
+    for ligne in dataFile:
+#        print(ligne)
+        vector = ligne.split()
+        vector[:] = list(map(int, vector))
+        if inputLine == True:
+            #print("input ", vector)
+            allX.append(vector)
+            inputLine = False
+        else:
+            #print("output ", vector)
+            allY.append(vector)
+            inputLine = True
+    # x_train and y_train are Numpy arrays 
+    x_train = np.array(allX)
+    y_train = np.array(allY)
+    return (inputSize, outputSize, x_train, y_train)
+
+
+
+if len(sys.argv) < 3 :
+    print('usage:', sys.argv[0], 'cffTrainFileName cffDevFileName pytorchModelFileName')
+    exit(1)
+
+cffTrainFileName =   sys.argv[1]
+cffDevFileName =     sys.argv[2]
+kerasModelFileName = sys.argv[3]
+
+inputSize, outputSize, x_train, y_train = readData(cffTrainFileName)
+devInputSize, devOutputSize, x_dev, y_dev = readData(cffDevFileName)
+model = mlp()
+
+model = nn.Sequential()
+model.add_module("dense1", nn.Linear(8,12))
+
+
+
+model = Sequential()
+model.add(Dense(units=128, activation='relu', input_dim=inputSize))
+model.add(Dropout(0.4))
+model.add(Dense(units=outputSize, activation='softmax'))
+model.compile(loss='categorical_crossentropy',
+                  optimizer='adam',
+                  metrics=['accuracy'])
+
+model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_dev,y_dev))
+
+
+#if len(sys.argv) == 5 :
+#    model.fit(x_train, y_train, epochs=5, batch_size=32, validation_data=(x_dev,y_dev))
+#else :
+#    model.fit(x_train, y_train, epochs=10, batch_size=32)
+
+model.save(kerasModelFileName)
+        
+