diff --git a/CMakeLists.txt b/CMakeLists.txt
index 660efad593180c859ac26386ced641f0138a7b04..f82836baad1b0f0abd86eea0f3076ef6ef2b49fd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,15 +3,14 @@ project(macaon2)
 
 find_package(FLEX)
 find_package(BLAS)
-#find_package(DyNet)
+find_package(DyNet)
 
 add_definitions("-Wall" )
 SET(CMAKE_C_COMPILER g++)
 SET(CMAKE_CXX_COMPILER g++)
 
-
-SET( CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -std=c++11 -Ofast -DUSE_CBLAS -I/home/marjorie/Documents/LIB/dynet -I/home/marjorie/Documents/LIB/eigen")
-SET( CMAKE_EXE_LINKER_FLAGS  "${CMAKE_EXE_LINKER_FLAGS} -lm -lopenblas -L/home/marjorie/Documents/LIB/dynet/build/dynet -ldynet" )
+SET( CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -std=c++11 -Ofast -DUSE_CBLAS" )
+SET( CMAKE_EXE_LINKER_FLAGS  "${CMAKE_EXE_LINKER_FLAGS} -lm -lopenblas -ldynet" )
 
 
 if (${CMAKE_C_COMPILER_VERSION} VERSION_LESS 5.3)
diff --git a/maca_trans_parser/src/train_cff.cpp b/maca_trans_parser/src/train_cff.cpp
index 67f91e1a6c9a64d937adb8dc1b001c1ba5150632..92bb3161edf9db1ede23fa221234bbc30e9d6e95 100644
--- a/maca_trans_parser/src/train_cff.cpp
+++ b/maca_trans_parser/src/train_cff.cpp
@@ -26,7 +26,7 @@ Layer::Layer(unsigned input_dim, unsigned output_dim, Activation activation, flo
 */
 MLP::MLP(ParameterCollection & model)
 {
-	LAYERS = 0;
+	LAYERS = layers.size();
 }
 
 /**
@@ -34,10 +34,11 @@ MLP::MLP(ParameterCollection & model)
 	* \details Creates a feedforward multilayer perceptron based on a list of layer descriptions
 	*
 	* \param model  : ParameterCollection (to contain parameters)
-	* \param layers : Layers description
+	* \param filename : file containing the MLP's structure
 */
-MLP::MLP(ParameterCollection& model, vector<Layer> layers) 
+MLP::MLP(ParameterCollection& model, char* filename/*vector<Layer> layers*/) 
 {
+	read_struct_mlp(filename);
 	// Verify layers compatibility
 	for (unsigned l = 0; l < layers.size() - 1; ++l) 
 	{
@@ -210,3 +211,25 @@ inline Expression MLP::activate(Expression h, Activation f)
 			break;
 	}
 }
+
+void MLP::read_struct_mlp(char* filename)
+{
+	ifstream file(filename, ios::in);
+	if(!file)
+	{ 
+		cerr << "Impossible d'ouvrir le fichier " << filename << endl;
+		exit(EXIT_FAILURE);
+	}
+	float tmp[4];
+	
+	while(file >> tmp[0]) //input_dim
+	{
+		file >> tmp[1]; //output_dim
+		file >> tmp[2]; //activation rate
+		file >> tmp[3]; //dropout
+		
+		Layer tmp_layer(tmp[0], tmp[1], tmp[2], tmp[3]);
+		layers.push_back(tmp_layer);
+	}
+}
+
diff --git a/maca_trans_parser/src/train_cff.hpp b/maca_trans_parser/src/train_cff.hpp
index e697aed0d792de74ee63a6518265e35f2061d0cf..3869270f204a04f5e0bc4d0bab968518f7ce507d 100644
--- a/maca_trans_parser/src/train_cff.hpp
+++ b/maca_trans_parser/src/train_cff.hpp
@@ -57,8 +57,9 @@
 			bool dropout_active = true;
 
 		public:
+			void read_struct_mlp(char* filename);
 			MLP(dynet::ParameterCollection & model);
-			MLP(dynet::ParameterCollection& model, std::vector<Layer> layers);
+			MLP(dynet::ParameterCollection& model, char* filename/*std::vector<Layer> layers*/);
 			void append(dynet::ParameterCollection& model, Layer layer);
 			dynet::Expression run(dynet::Expression x, dynet::ComputationGraph& cg);
 			dynet::Expression get_nll(dynet::Expression x, std::vector</*dynet::real*/unsigned int> labels, dynet::ComputationGraph& cg);
diff --git a/maca_trans_parser/src/train_dynet.cpp b/maca_trans_parser/src/train_dynet.cpp
index 957e7150be2b0a2df5d415de4165ac19826a2b6d..d99165b6596fe7d3a77c7ffe17b6a0886b7480c3 100644
--- a/maca_trans_parser/src/train_dynet.cpp
+++ b/maca_trans_parser/src/train_dynet.cpp
@@ -3,6 +3,16 @@
     ./trainCFF train_file dev_file batch_size nb_epochs
 **/
 
+
+/**
+Corriger cmake pour inclure Dynet
+Lire le fichier de Flo et Seb
+init le vecteur de Layer (push_back)
+* 
+Représentation one-hot
+Représentation embedding
+**/
+
 #include <iostream>
 #include <fstream>
 #include "train_cff.hpp"
@@ -21,7 +31,6 @@ using namespace dynet;
 
 
 
-
 /**
  * Recupere a partir d'un fichier fm la dimention de l'entree du reseau
  * */
@@ -135,10 +144,10 @@ int main(int argc, char** argv)
 	// Use Adam optimizer
 	AdamTrainer trainer(model);
 	trainer.clip_threshold *= batch_size;
-
+	
 	// Create model
 	MLP nn(model, vector<Layer>({
-		Layer(/* input_dim (nb de features) */ 5, /* output_dim */ 50, /* activation */ RELU, /* dropout_rate */ 0.2),
+		Layer(/* input_dim (nb de features) */ input_dim, /* output_dim */ 50, /* activation */ RELU, /* dropout_rate */ 0.2),
 		Layer(/* input_dim */ 50, /* output_dim */ 100, /* activation */ RELU, /* dropout_rate */ 0.2),
 		Layer(/* input_dim */ 100, /* output_dim */ 150, /* activation */ RELU, /* dropout_rate */ 0.2),
 		Layer(/* input_dim */ 150, /* output_dim (nb de classes possibles) */ 3, /* activation */ LINEAR, /* dropout_rate */ 0.0)