diff --git a/MLP/src/MLP.cpp b/MLP/src/MLP.cpp
index 8e1e0c431665c3edd41ff1f86ee43a2b6817e7de..01ee7b3bf37d44a4fcaf88cdd71cf55e31984d74 100644
--- a/MLP/src/MLP.cpp
+++ b/MLP/src/MLP.cpp
@@ -434,6 +434,9 @@ inline dynet::Expression MLP::activate(dynet::Expression h, Activation f)
       break;
   }
 
+  fprintf(stderr, "ERROR (%s) : Activation not implemented \'%s\'. Aborting.\n", ERRINFO, activation2str(f).c_str());
+  exit(1);
+
   return h;
 }
 
diff --git a/neural_network/include/MLP.hpp b/neural_network/include/MLP.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b927afbc9705bc9c68b3e679c9ba6e90057842c6
--- /dev/null
+++ b/neural_network/include/MLP.hpp
@@ -0,0 +1,226 @@
+/// @file MLP.hpp
+/// @author Franck Dary
+/// @version 2.0
+/// @date 2018-08-03
+
+#ifndef MLP__H
+#define MLP__H
+
+#include "NeuralNetwork.hpp"
+
+/// @brief Multi Layer Perceptron.
+///
+/// It is capable of training itself given a batch of examples.\n
+/// Once trained, it can also be used to predict the class of a certain input.
+class MLP : NeuralNetwork
+{
+  public :
+
+  /// @brief Activation function for a MLP Layer.
+  enum Activation
+  {
+    SIGMOID,
+    TANH,
+    RELU,
+    ELU,
+    LINEAR,
+    SPARSEMAX,
+    CUBE,
+    SOFTMAX
+  };
+
+  /// @brief Get the string corresponding to an Activation.
+  ///
+  /// @param a The activation.
+  ///
+  /// @return The string corresponding to a.
+  static std::string activation2str(Activation a);
+
+  /// @brief Get the Activation corresponding to a string.
+  ///
+  /// @param s The string.
+  ///
+  /// @return The Activation corresponding to s. If s is unknown, the program abort.
+  static Activation str2activation(std::string s);
+
+  /// @brief A simple struct that represents a MLP Layer.
+  struct Layer
+  {
+    /// @brief Number of input neurons of this Layer.
+    int input_dim;
+    /// @brief Number of output neurons of this Layer.
+    int output_dim;
+
+    /// @brief The dropout rate to apply to this Layer when training.
+    float dropout_rate;
+    /// @brief The activation function for this Layer.
+    Activation activation;
+
+    /// @brief Construct a new Layer
+    ///
+    /// @param input_dim 
+    /// @param output_dim
+    /// @param dropout_rate
+    /// @param activation
+    Layer(int input_dim, int output_dim,
+      float dropout_rate, Activation activation);
+    /// @brief Print a description of this Layer.
+    ///
+    /// @param file Where to print the output.
+    void print(FILE * file);
+  };
+
+  private :
+
+  /// @brief The Layers of the MLP.
+  std::vector<Layer> layers;
+  /// @brief The parameters corresponding to the layers of the MLP.
+  std::vector< std::vector<dynet::Parameter> > parameters;
+
+  /// @brief The training algorithm that will be used.
+  std::unique_ptr<dynet::Trainer> trainer;
+  /// @brief Must the Layer dropout rate be taken into account during the computations ? Usually it is only during the training step.
+  bool dropoutActive;
+
+  /// @brief The current minibatch.
+  std::vector<FeatureModel::FeatureDescription> fds;
+  /// @brief gold classes of the current minibatch.
+  std::vector<unsigned int> golds;
+
+  private :
+
+  /// @brief Add the parameters of a layer into the dynet model.
+  ///
+  /// @param layer The layer to add.
+  void addLayerToModel(Layer & layer);
+  /// @brief Abort the program if the layers are not compatible.
+  void checkLayersCompatibility();
+  /// @brief Set dynet and srand() seeds.
+  ///
+  /// @return The DynetParams containing the set seed.
+  dynet::DynetParams & getDefaultParams() override;
+  /// @brief Convert a FeatureValue to a dynet Expression that will be used as an input of the Multi Layer Perceptron.
+  ///
+  /// @param cg The current Computation Graph.
+  /// @param fv The FeatureValue that will be converted.
+  ///
+  /// @return A dynet Expression of value fv that can be used as an input in the Multi Layer Perceptron
+  dynet::Expression featValue2Expression(dynet::ComputationGraph & cg, const FeatureModel::FeatureValue & fv);
+  /// @brief Compute the image of input x by the Multi Layer Perceptron.
+  ///
+  /// @param cg The current computation graph.
+  /// @param x The input of the Multi Layer Perceptron.
+  ///
+  /// @return The result (values of the output Layer) of the computation of x by the Multi Layer Perceptron.
+  dynet::Expression run(dynet::ComputationGraph & cg, dynet::Expression x);
+  /// @brief Compute the image of an expression by an activation function.
+  ///
+  /// @param h The expression we want the image of.
+  /// @param f The activation function.
+  ///
+  /// @return f(h)
+  inline dynet::Expression activate(dynet::Expression h, Activation f);
+  /// @brief Print the parameters.
+  ///
+  /// @param output Where the parameters will be printed to.
+  void printParameters(FILE * output);
+  /// @brief Save the structure of the MLP (all the Layer) to a file.
+  ///
+  /// The goal is to store the structure of the MLP into a file so that
+  /// we can load it and use it another time.
+  /// @param filename The file in which the structure will be saved.
+  void saveStruct(const std::string & filename);
+  /// @brief Save the learned parameters of the MLP to a file.
+  ///
+  /// Only the parameters of the Layers will be saved by this function.\n
+  /// The parameters that are values inside of Dict, will be saved by their owner,
+  /// the Dict object.
+  /// @param filename The file in which the parameters will be saved.
+  void saveParameters(const std::string & filename);
+  /// @brief Load and construt all the Layer from a file.
+  ///
+  /// The file must have been written by the function saveStruct.
+  /// @param filename The file from which the structure will be read.
+  void loadStruct(const std::string & filename);
+  /// @brief Load and populate the model with parameters from a file.
+  ///
+  /// The file must have been written by the function saveParameters.
+  /// @param filename The file from which the parameters will be read.
+  void loadParameters(const std::string & filename);
+  /// @brief Load a MLP from a file.
+  ///
+  /// This function will use loadStruct and loadParameters.
+  /// @param filename The file from which the MLP will be loaded.
+  void load(const std::string & filename);
+  /// @brief Initialize the dynet library.
+  ///
+  /// Must be called only once, and before any call to dynet functions.
+  void initDynet();
+  /// @brief Get the loss expression 
+  ///
+  /// @param output Output from the neural network
+  /// @param oneHotGolds Indexes of gold classes (batched form)
+  ///
+  /// @return The loss expression
+  dynet::Expression weightedLoss(dynet::Expression & output, std::vector<unsigned int> & oneHotGolds);
+
+  dynet::Expression errorCorrectionLoss(dynet::ComputationGraph & cg, dynet::Expression & output, std::vector<unsigned int> & oneHotGolds);
+
+  public :
+
+  /// @brief Convert a dynet expression to a string (usefull for debug purposes)
+  ///
+  /// @param expr The expression to convert.
+  ///
+  /// @return A string representing the expression.
+  static std::string expression2str(dynet::Expression & expr);
+  /// @brief initialize a new untrained MLP from a desired topology.
+  ///
+  /// topology example for 2 hidden layers : (150,RELU,0.3)(50,ELU,0.2)\n
+  /// Of sizes 150 and 50, activation functions RELU and ELU, and dropout rates
+  /// of 0.3 and 0.2.
+  /// @param nbInputs The size of the input layer of the MLP.
+  /// @param topology Description of each hidden Layer of the MLP.
+  /// @param nbOutputs The size of the output layer of the MLP.
+  void init(int nbInputs, const std::string & topology, int nbOutputs);
+  /// @brief Construct a new MLP for training.
+  MLP();
+  /// @brief Read and construct a trained MLP from a file.
+  ///
+  /// The file must have been written by save.
+  /// @param filename The file to read the MLP from.
+  MLP(const std::string & filename);
+  /// @brief Give a score to each possible class, given an input.
+  ///
+  /// @param fd The input to use.
+  ///
+  /// @return A vector containing one score per possible class.
+  std::vector<float> predict(FeatureModel::FeatureDescription & fd);
+
+  /// @brief Update the parameters according to the given gold class.
+  ///
+  /// @param fd The input to use.
+  /// @param gold The gold class of this input.
+  ///
+  /// @return The loss.
+  float update(FeatureModel::FeatureDescription & fd, int gold);
+
+  /// @brief Save the MLP to a file.
+  /// 
+  /// @param filename The file to write the MLP to.
+  void save(const std::string & filename);
+  /// @brief Print the topology (Layers) of the MLP.
+  ///
+  /// @param output Where the topology will be printed.
+  void printTopology(FILE * output);
+  /// @brief Allocate the correct trainer type depending on the program parameters.
+  ///
+  /// @return A pointer to the newly allocated trainer.
+  dynet::Trainer * createTrainer();
+  /// @brief Return the model.
+  ///
+  /// @return The model of this MLP.
+  dynet::ParameterCollection & getModel();
+};
+
+#endif
diff --git a/neural_network/include/NeuralNetwork.hpp b/neural_network/include/NeuralNetwork.hpp
index 46e9c1ba8c82345563d42d5fca28f78eaddd4443..52c81cb686747a872ddf0d55cfa07067ad515a1c 100644
--- a/neural_network/include/NeuralNetwork.hpp
+++ b/neural_network/include/NeuralNetwork.hpp
@@ -10,6 +10,17 @@
 
 class NeuralNetwork
 {
+  private :
+
+  /// @brief The seed that will be used by RNG (srand and dynet)
+  static int randomSeed;
+
+  /// @brief Tracks wether or not dynet has been initialized
+  static bool dynetIsInit;
+
+  /// @brief The dynet model containing the parameters to be trained.
+  dynet::ParameterCollection model;
+
   public :
 
   /// @brief Convert a dynet expression to a string (usefull for debug purposes)
@@ -50,6 +61,11 @@ class NeuralNetwork
   ///
   /// @param output Where the topology will be printed.
   virtual void printTopology(FILE * output) = 0;
+
+  /// @brief Return the model.
+  ///
+  /// @return The model of this NeuralNetwork.
+  dynet::ParameterCollection & getModel();
 };
 
 #endif
diff --git a/neural_network/src/MLP.cpp b/neural_network/src/MLP.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..54f0d73464805df55b096a2769937d9c5bacab5b
--- /dev/null
+++ b/neural_network/src/MLP.cpp
@@ -0,0 +1,548 @@
+std::string MLP::activation2str(Activation a)
+{
+  switch(a)
+  {
+    case LINEAR :
+      return "LINEAR";
+      break;
+    case RELU :
+      return "RELU";
+      break;
+    case ELU :
+      return "ELU";
+      break;
+    case CUBE :
+      return "CUBE";
+      break;
+    case SIGMOID :
+      return "SIGMOID";
+      break;
+    case TANH :
+      return "TANH";
+      break;
+    case SOFTMAX :
+      return "SOFTMAX";
+      break;
+    case SPARSEMAX :
+      return "SPARSEMAX";
+      break;
+    default :
+      break;
+  }
+
+  return "UNKNOWN";
+}
+
+MLP::Activation MLP::str2activation(std::string s)
+{
+  if(s == "LINEAR")
+    return LINEAR;
+  else if(s == "RELU")
+    return RELU;
+  else if(s == "ELU")
+    return ELU;
+  else if(s == "CUBE")
+    return CUBE;
+  else if(s == "SIGMOID")
+    return SIGMOID;
+  else if(s == "TANH")
+    return TANH;
+  else if(s == "SOFTMAX")
+    return SOFTMAX;
+  else if(s == "SPARSEMAX")
+    return SPARSEMAX;
+  else
+  {
+    fprintf(stderr, "ERROR (%s) : invalid activation \'%s\'. Aborting\n",ERRINFO, s.c_str());
+    exit(1);
+  }
+
+  return LINEAR;
+}
+
+void MLP::initDynet()
+{
+  if(dynetIsInit)
+    return;
+
+  dynetIsInit = true;
+  dynet::initialize(getDefaultParams());
+}
+
+MLP::MLP()
+{
+  randomSeed = ProgramParameters::seed;
+  dropoutActive = true;
+  trainer.reset(createTrainer());
+  initDynet();
+}
+
+void MLP::init(int nbInputs, const std::string & topology, int nbOutputs)
+{
+  std::string topo = topology;
+  std::replace(topo.begin(), topo.end(), '(', ' ');
+  std::replace(topo.begin(), topo.end(), ')', ' ');
+
+  auto groups = split(topo);
+  for (auto group : groups)
+  {
+    if(group.empty())
+      continue;
+
+    std::replace(group.begin(), group.end(), ',', ' ');
+    auto layer = split(group);
+
+    if (layer.size() != 3)
+    {
+      fprintf(stderr, "ERROR (%s) : invalid topology \'%s\'. Aborting.\n", ERRINFO, topology.c_str());
+      exit(1);
+    }
+
+    int input = layers.empty() ? nbInputs : layers.back().output_dim;
+    int output = std::stoi(layer[0]); 
+    float dropout = std::stof(layer[2]);
+    layers.emplace_back(input, output, dropout, str2activation(layer[1]));
+  }
+
+  layers.emplace_back(layers.back().output_dim, nbOutputs, 0.0, Activation::LINEAR);
+
+  checkLayersCompatibility();
+
+  for(Layer layer : layers)
+    addLayerToModel(layer);
+}
+
+dynet::Trainer * MLP::createTrainer()
+{
+  auto optimizer = noAccentLower(ProgramParameters::optimizer);
+
+  if (optimizer == "amsgrad")
+    return new dynet::AmsgradTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
+  else if (optimizer == "adam")
+    return new dynet::AdamTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
+  else if (optimizer == "sgd")
+    return new dynet::SimpleSGDTrainer(model, ProgramParameters::learningRate);
+  else if (optimizer == "none")
+    return nullptr;
+
+  fprintf(stderr, "ERROR (%s) : unknown optimizer \'%s\'. Aborting.\n", ERRINFO, optimizer.c_str());
+
+  exit(1);
+
+  return nullptr;
+}
+
+void MLP::addLayerToModel(Layer & layer)
+{
+  dynet::Parameter W = model.add_parameters({(unsigned)layer.output_dim, (unsigned)layer.input_dim});
+  dynet::Parameter b = model.add_parameters({(unsigned)layer.output_dim});
+  if (!ProgramParameters::randomParameters)
+  {
+    W.set_value(std::vector<float>((unsigned)layer.output_dim * (unsigned)layer.input_dim, 1.0));
+    b.set_value(std::vector<float>((unsigned)layer.output_dim, 1.0));
+  }
+  parameters.push_back({W,b});
+}
+
+void MLP::checkLayersCompatibility()
+{
+  if(layers.empty())
+  {
+    fprintf(stderr, "ERROR (%s) : constructed mlp with 0 layers. Aborting.\n", ERRINFO);
+    exit(1);
+  }
+
+  for(unsigned int i = 0; i < layers.size()-1; i++)
+    if(layers[i].output_dim != layers[i+1].input_dim)
+    {
+      fprintf(stderr, "ERROR (%s) : constructed mlp with incompatible layers. Aborting.\n", ERRINFO);
+      exit(1);
+    }
+}
+
+MLP::Layer::Layer(int input_dim, int output_dim,
+                  float dropout_rate, Activation activation)
+{
+  this->input_dim = input_dim;
+  this->output_dim = output_dim;
+  this->dropout_rate = dropout_rate;
+  this->activation = activation;
+}
+
+std::vector<float> MLP::predict(FeatureModel::FeatureDescription & fd)
+{
+  bool currentDropoutActive = dropoutActive;
+  dropoutActive = false;
+  dynet::ComputationGraph cg;
+
+  std::vector<dynet::Expression> expressions;
+
+  for (auto & featValue : fd.values)
+    expressions.emplace_back(featValue2Expression(cg, featValue));
+
+  dynet::Expression input = dynet::concatenate(expressions);
+
+  dynet::Expression output = run(cg, input);
+
+  dropoutActive = currentDropoutActive;
+
+  return as_vector(cg.forward(output));
+}
+
+float MLP::update(FeatureModel::FeatureDescription & fd, int gold)
+{
+  fds.emplace_back(fd);
+  golds.emplace_back(gold);
+
+  if ((int)fds.size() < ProgramParameters::batchSize)
+    return 0.0;
+
+  std::vector<dynet::Expression> inputs;
+  dynet::ComputationGraph cg;
+
+  for (auto & example : fds)
+  {
+    std::vector<dynet::Expression> expressions;
+
+    for (auto & featValue : example.values)
+      expressions.emplace_back(featValue2Expression(cg, featValue));
+
+    dynet::Expression input = dynet::concatenate(expressions);
+    inputs.emplace_back(input);
+  }
+
+  dynet::Expression batchedInput = dynet::concatenate_to_batch(inputs);
+  dynet::Expression output = run(cg, batchedInput);
+  dynet::Expression batchedLoss;
+ 
+  if (ProgramParameters::loss == "neglogsoftmax")
+  {
+    batchedLoss = dynet::sum_batches(pickneglogsoftmax(output, golds));
+  }
+  else if (ProgramParameters::loss == "weighted")
+  {
+    batchedLoss = weightedLoss(output, golds);
+  }
+  else if (ProgramParameters::loss == "errorCorrection")
+  {
+    batchedLoss = errorCorrectionLoss(cg,output, golds);
+  }
+  else
+  {
+    fprintf(stderr, "ERROR (%s) : Unknown loss function \'%s\'. Aborting.\n", ERRINFO, ProgramParameters::loss.c_str());
+    exit(1);
+  }
+
+  cg.backward(batchedLoss);
+  trainer->update();
+
+  fds.clear();
+  golds.clear();
+
+  return as_scalar(batchedLoss.value());
+}
+
+dynet::Expression MLP::weightedLoss(dynet::Expression & output, std::vector<unsigned int> & oneHotGolds)
+{
+  std::vector<dynet::Expression> lossExpr;
+  for (unsigned int i = 0; i < output.dim().batch_elems(); i++)
+  {
+    lossExpr.emplace_back(dynet::pickneglogsoftmax(dynet::pick_batch_elem(output, i), oneHotGolds[i]));
+    auto outputVect = dynet::as_vector(dynet::pick_batch_elem(output,i).value());
+    int prediction = 0;
+    for (unsigned int j = 1; j < outputVect.size(); j++)
+      if(outputVect[j] > outputVect[prediction])
+        prediction = j;
+    int gold = oneHotGolds[i];
+    if (prediction == 1 && gold == 0)
+    {
+      lossExpr.back() = lossExpr.back() * 100.0;
+    }
+  }
+
+  return dynet::sum(lossExpr);
+}
+
+dynet::Expression MLP::errorCorrectionLoss(dynet::ComputationGraph & cg, dynet::Expression & output, std::vector<unsigned int> & oneHotGolds)
+{
+  std::vector<dynet::Expression> lossExpr;
+  for (unsigned int i = 0; i < output.dim().batch_elems(); i++)
+  {
+    unsigned int u = 0;
+    dynet::Expression c = dynet::pick(dynet::one_hot(cg, layers.back().output_dim, oneHotGolds[i]),u);
+    dynet::Expression a = dynet::pick(dynet::softmax(dynet::pick_batch_elem(output,i)),u);
+    lossExpr.emplace_back(dynet::pickneglogsoftmax(dynet::pick_batch_elem(output, i),oneHotGolds[i])+2-c-a*c+(dynet::acos(a-1)*(c-1)));
+    if (ProgramParameters::debug)
+    {
+      cg.forward(lossExpr.back());
+      fprintf(stderr, "a=%.2f c=%.2f loss=%.2f\n", dynet::as_scalar(a.value()),dynet::as_scalar(c.value()),dynet::as_scalar(lossExpr.back().value()));
+    }
+  }
+
+  return dynet::sum(lossExpr);
+}
+
+dynet::DynetParams & MLP::getDefaultParams()
+{
+  static dynet::DynetParams params;
+  params.random_seed = randomSeed;
+
+  std::srand(params.random_seed);
+
+  return params;
+}
+
+dynet::Expression MLP::featValue2Expression(dynet::ComputationGraph & cg, const FeatureModel::FeatureValue & fv)
+{
+  std::vector<dynet::Expression> expressions;
+
+  for (unsigned int i = 0; i < fv.dicts.size(); i++)
+  {
+    Dict * dict = fv.dicts[i];
+    bool isConst = (fv.policies[i] == FeatureModel::Policy::Final) || (dict->mode == Dict::Mode::OneHot);
+
+    auto & lu = dict->getLookupParameter();
+    unsigned int index = dict->getValue(fv.values[i]);
+
+    if(isConst)
+      expressions.emplace_back(dynet::const_lookup(cg, lu, index));
+    else
+      expressions.emplace_back(dynet::lookup(cg, lu, index));
+  }
+
+  return dynet::concatenate(expressions);
+}
+
+dynet::Expression MLP::run(dynet::ComputationGraph & cg, dynet::Expression x)
+{
+  static std::vector< std::pair<std::string,dynet::Expression> > exprForDebug;
+
+  // Expression for the current hidden state
+  dynet::Expression h_cur = x;
+
+  if (ProgramParameters::showFeatureRepresentation)
+  {
+    if (ProgramParameters::showFeatureRepresentation == 1)
+      for (unsigned int i = 0; i < 81; i++)
+        fprintf(stderr, "%s", i == 80 ? "\n" : "-");
+    exprForDebug.clear();
+    if (ProgramParameters::showFeatureRepresentation == 1)
+      exprForDebug.emplace_back("Input layer", h_cur);
+    if (ProgramParameters::showFeatureRepresentation >= 2)
+      exprForDebug.emplace_back("", h_cur);
+  }
+
+  for(unsigned int l = 0; l < layers.size(); l++)
+  {
+    // Initialize parameters in computation graph
+    dynet::Expression W = parameter(cg, parameters[l][0]);
+    dynet::Expression b = parameter(cg, parameters[l][1]);
+    // Apply affine transform
+    dynet::Expression a = dynet::affine_transform({b, W, h_cur});
+    // Apply activation function
+    dynet::Expression h = activate(a, layers[l].activation);
+    // Take care of dropout
+    dynet::Expression h_dropped;
+    if(layers[l].dropout_rate > 0){
+      if(dropoutActive){
+        dynet::Expression mask = random_bernoulli(cg, 
+         {(unsigned int)layers[l].output_dim}, 1 - layers[l].dropout_rate);
+        h_dropped = cmult(h, mask);
+      }
+      else{
+        h_dropped = h * (1 - layers[l].dropout_rate);
+      }
+    }
+    else{
+      h_dropped = h;
+    }
+
+    if (ProgramParameters::showFeatureRepresentation)
+    {
+      if (ProgramParameters::showFeatureRepresentation == 1)
+      {
+        exprForDebug.emplace_back("Result of h = h*W_" + std::to_string(l) + " + b_" + std::to_string(l), a);
+        exprForDebug.emplace_back("Result of h = a_" + std::to_string(l) + "(h)", h);
+        exprForDebug.emplace_back("Result of h = dropout_" + std::to_string(l) + "(h)", h_dropped);
+      }
+      else if (ProgramParameters::showFeatureRepresentation >= 2)
+      {
+        exprForDebug.emplace_back("", a);
+        exprForDebug.emplace_back("", h);
+      }
+    }
+
+    h_cur = h_dropped;
+  }
+
+  if (ProgramParameters::showFeatureRepresentation)
+  {
+    cg.forward(h_cur);
+
+    if (ProgramParameters::showFeatureRepresentation == 1)
+    {
+      for (auto & it : exprForDebug)
+        fprintf(stderr, "%s (dimension=%lu) :\n%s\n", it.first.c_str(), dynet::as_vector(it.second.value()).size(), expression2str(it.second).c_str());
+      for (unsigned int i = 0; i < 81; i++)
+        fprintf(stderr, "%s", i == 80 ? "\n" : "-");
+    }
+    else if (ProgramParameters::showFeatureRepresentation >= 2)
+    {
+      for (auto & it : exprForDebug)
+        fprintf(stderr, "| %s |", expression2str(it.second).c_str());
+      fprintf(stderr, "\n");
+    }
+  }
+
+  return h_cur;
+}
+
+inline dynet::Expression MLP::activate(dynet::Expression h, Activation f)
+{
+  switch(f)
+  {
+    case LINEAR :
+      return h;
+      break;
+    case RELU :
+      return rectify(h);
+      break;
+    case ELU :
+      return elu(h);
+      break;
+    case SIGMOID :
+      return logistic(h);
+      break;
+    case TANH :
+      return tanh(h);
+      break;
+    case SOFTMAX :
+      return softmax(h);
+      break;
+    default :
+      break;
+  }
+
+  fprintf(stderr, "ERROR (%s) : Activation not implemented \'%s\'. Aborting.\n", ERRINFO, activation2str(f).c_str());
+  exit(1);
+
+  return h;
+}
+
+void MLP::printParameters(FILE * output)
+{
+  fprintf(output, "Parameters : NOT IMPLEMENTED\n");
+}
+
+void MLP::save(const std::string & filename)
+{
+  saveStruct(filename);
+  saveParameters(filename);
+}
+
+void MLP::saveStruct(const std::string & filename)
+{
+  File file(filename, "w");
+  FILE * fd = file.getDescriptor();
+
+  for (auto & layer : layers)
+  {
+    fprintf(fd, "Layer : %d %d %s %.2f\n", layer.input_dim, layer.output_dim, activation2str(layer.activation).c_str(), layer.dropout_rate);
+  }
+}
+
+void MLP::saveParameters(const std::string & filename)
+{
+  dynet::TextFileSaver s(filename, true);
+  std::string prefix("Layer_");
+
+  for(unsigned int i = 0; i < parameters.size(); i++)
+  {
+    s.save(parameters[i][0], prefix + std::to_string(i) + "_W");
+    s.save(parameters[i][1], prefix + std::to_string(i) + "_b");
+  }
+}
+
+void MLP::load(const std::string & filename)
+{
+  loadStruct(filename);
+  loadParameters(filename);
+}
+
+void MLP::loadStruct(const std::string & filename)
+{
+  File file(filename, "r");
+  FILE * fd = file.getDescriptor();
+
+  char activation[1024];
+  int input;
+  int output;
+  float dropout;
+
+  while (fscanf(fd, "Layer : %d %d %s %f\n", &input, &output, activation, &dropout) == 4)
+    layers.emplace_back(input, output, dropout, str2activation(activation));
+
+  checkLayersCompatibility();
+
+  for (auto & layer : layers)
+    addLayerToModel(layer);
+}
+
+void MLP::loadParameters(const std::string & filename)
+{
+  dynet::TextFileLoader loader(filename);
+  std::string prefix("Layer_");
+
+  for(unsigned int i = 0; i < parameters.size(); i++)
+  {
+    parameters[i][0] = loader.load_param(model, prefix + std::to_string(i) + "_W");
+    parameters[i][1] = loader.load_param(model, prefix + std::to_string(i) + "_b");
+  }
+}
+
+MLP::MLP(const std::string & filename)
+{
+  dropoutActive = true;
+
+  randomSeed = ProgramParameters::seed;
+  trainer.reset(createTrainer());
+  initDynet();
+
+  load(filename);
+}
+
+void MLP::printTopology(FILE * output)
+{
+  fprintf(output, "(");
+  for(unsigned int i = 0; i < layers.size(); i++)
+  {
+    auto & layer = layers[i];
+
+    if(i == 0)
+      fprintf(output, "%d", layer.input_dim);
+    fprintf(output, "->%d", layer.output_dim);
+  }
+
+  fprintf(output, ")\n");
+}
+
+dynet::ParameterCollection & MLP::getModel()
+{
+  return model;
+}
+
+std::string MLP::expression2str(dynet::Expression & expr)
+{
+  std::string result = "";
+
+  auto elem = dynet::as_vector(expr.value());
+
+  for (auto & f : elem)
+    result += float2str(f, "%f") + " ";
+
+  if (!result.empty())
+  result.pop_back();
+
+  return result;
+}
+
diff --git a/neural_network/src/NeuralNetwork.cpp b/neural_network/src/NeuralNetwork.cpp
index 8be87b89b50a248ba7832e05621a1c8fe986f2de..894e8494b0af4e22690d3f00ae633f006c8cda02 100644
--- a/neural_network/src/NeuralNetwork.cpp
+++ b/neural_network/src/NeuralNetwork.cpp
@@ -1,5 +1,8 @@
 #include "NeuralNetwork.hpp"
 
+int NeuralNetwork::randomSeed = 0;
+bool NeuralNetwork::dynetIsInit = false;
+
 std::string NeuralNetwork::expression2str(dynet::Expression & expr)
 {
   std::string result = "";
@@ -15,3 +18,8 @@ std::string NeuralNetwork::expression2str(dynet::Expression & expr)
   return result;
 }
 
+dynet::ParameterCollection & NeuralNetwork::getModel()
+{
+  return model;
+}
+