From 9ea75e4320a515dfb65f5aca1ea062ffd622fb28 Mon Sep 17 00:00:00 2001
From: Franck Dary <franck.dary@lis-lab.fr>
Date: Thu, 21 Mar 2019 15:58:42 +0100
Subject: [PATCH] Added debug info

---
 neural_network/include/MLPBase.hpp |  5 +++++
 neural_network/src/MLP.cpp         | 14 +++++++++++---
 neural_network/src/MLPBase.cpp     | 25 +++++++++++++++++++++++++
 3 files changed, 41 insertions(+), 3 deletions(-)

diff --git a/neural_network/include/MLPBase.hpp b/neural_network/include/MLPBase.hpp
index b8bafcd..2d047e1 100644
--- a/neural_network/include/MLPBase.hpp
+++ b/neural_network/include/MLPBase.hpp
@@ -35,6 +35,11 @@ class MLPBase
   /// @brief gold classes of the current minibatch.
   std::vector<unsigned int> golds;
 
+  private :
+
+  /// \brief Check gradients values for debug purpose.
+  void checkGradients();
+
   public :
 
   /// @brief Add the parameters of a layer into the dynet model.
diff --git a/neural_network/src/MLP.cpp b/neural_network/src/MLP.cpp
index 3dae489..c938842 100644
--- a/neural_network/src/MLP.cpp
+++ b/neural_network/src/MLP.cpp
@@ -26,15 +26,23 @@ dynet::Trainer * MLP::createTrainer()
 {
   auto optimizer = noAccentLower(ProgramParameters::optimizer);
 
+  dynet::Trainer * trainer = nullptr;
+
   if (optimizer == "amsgrad")
-    return new dynet::AmsgradTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
+    trainer = new dynet::AmsgradTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
   else if (optimizer == "adam")
-    return new dynet::AdamTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
+    trainer =  new dynet::AdamTrainer(model, ProgramParameters::learningRate, ProgramParameters::beta1, ProgramParameters::beta2, ProgramParameters::bias);
   else if (optimizer == "sgd")
-    return new dynet::SimpleSGDTrainer(model, ProgramParameters::learningRate);
+    trainer =  new dynet::SimpleSGDTrainer(model, ProgramParameters::learningRate);
   else if (optimizer == "none")
     return nullptr;
 
+  if (trainer)
+  {
+    trainer->sparse_updates_enabled = true;
+    return trainer;
+  }
+
   fprintf(stderr, "ERROR (%s) : unknown optimizer \'%s\'. Aborting.\n", ERRINFO, optimizer.c_str());
 
   exit(1);
diff --git a/neural_network/src/MLPBase.cpp b/neural_network/src/MLPBase.cpp
index aa06c1d..60236bd 100644
--- a/neural_network/src/MLPBase.cpp
+++ b/neural_network/src/MLPBase.cpp
@@ -135,12 +135,37 @@ float MLPBase::update(FeatureModel::FeatureDescription & fd, int gold)
 
   cg.backward(batchedLoss);
 
+  checkGradients();
+
   fds.clear();
   golds.clear();
 
   return as_scalar(batchedLoss.value());
 }
 
+void MLPBase::checkGradients()
+{
+  bool printGradients = false;
+
+  if (printGradients)
+  {
+    fprintf(stderr, "Gradients :\n");
+    for (auto & layer : parameters)
+      for (auto & param : layer)
+      {
+        auto dim = param.dim();
+        auto gradients = param.gradients()->v;
+        fprintf(stderr, "Parameter's gradients :\n");
+        int nbRows = dim.rows();
+        int nbCols = dim.cols();
+
+        for (int i = 0; i < nbRows; i++)
+          for (int j = 0; j < nbCols; j++)
+            fprintf(stderr, "%8.5f%s", gradients[i*nbRows + j], j == nbCols-1 ? "\n" : " ");
+      }
+  }
+}
+
 dynet::Expression MLPBase::weightedLoss(dynet::Expression & output, std::vector<unsigned int> & oneHotGolds)
 {
   std::vector<dynet::Expression> lossExpr;
-- 
GitLab