From 89be9aff969193f5eed9465120a725f0d109d596 Mon Sep 17 00:00:00 2001
From: Franck Dary <franck.dary@etu.univ-amu.fr>
Date: Tue, 20 Nov 2018 13:56:40 +0100
Subject: [PATCH] Added a way to get sequences mean entropy and standard
 deviation during training

---
 decoder/src/Decoder.cpp                   |  6 +--
 trainer/include/Trainer.hpp               |  2 +-
 trainer/src/Trainer.cpp                   | 53 ++++++++++++++++++++---
 trainer/src/macaon_train.cpp              |  2 +
 transition_machine/include/Classifier.hpp |  7 +++
 transition_machine/src/Classifier.cpp     | 13 ++++++
 6 files changed, 71 insertions(+), 12 deletions(-)

diff --git a/decoder/src/Decoder.cpp b/decoder/src/Decoder.cpp
index 55920a8..1a70e99 100644
--- a/decoder/src/Decoder.cpp
+++ b/decoder/src/Decoder.cpp
@@ -72,11 +72,7 @@ void Decoder::decode()
     {
       nbActionsInSequence++;
 
-      for (unsigned int i = 0; i < 2 && i < weightedActions.size(); i++)
-      {
-        auto it = weightedActions.begin() + i;
-        entropyAccumulator -= it->second.first - (it->second.first - weightedActions[0].second.first);
-      }
+      entropyAccumulator += Classifier::computeEntropy(weightedActions);
 
       if (config.head >= 1 && config.getTape(ProgramParameters::sequenceDelimiterTape)[config.head-1] != ProgramParameters::sequenceDelimiter)
         justFlipped = false;
diff --git a/trainer/include/Trainer.hpp b/trainer/include/Trainer.hpp
index 1ca9132..045c800 100644
--- a/trainer/include/Trainer.hpp
+++ b/trainer/include/Trainer.hpp
@@ -59,7 +59,7 @@ class Trainer
   /// @brief Get the scores of the classifiers on the dev dataset.
   ///
   /// @return Map from each Classifier name to their score.
-  std::map<std::string, float> getScoreOnDev();
+  std::map<std::string, std::pair<float, std::pair<float, float> > > getScoreOnDev();
 
   public :
 
diff --git a/trainer/src/Trainer.cpp b/trainer/src/Trainer.cpp
index 3f43c06..b3b55b9 100644
--- a/trainer/src/Trainer.cpp
+++ b/trainer/src/Trainer.cpp
@@ -12,7 +12,7 @@ Trainer::Trainer(TransitionMachine & tm, BD & bd, Config & config, BD * devBD, C
 {
 }
 
-std::map<std::string, float> Trainer::getScoreOnDev()
+std::map<std::string, std::pair<float, std::pair<float, float> > > Trainer::getScoreOnDev()
 {
   if (!devConfig)
     return {};
@@ -25,6 +25,11 @@ std::map<std::string, float> Trainer::getScoreOnDev()
   if (ProgramParameters::debug)
     fprintf(stderr, "Computing score on dev set\n");
 
+  int nbActionsInSequence = 0;
+  float entropyAccumulator = 0.0;
+  bool justFlipped = false;
+  std::vector<float> entropies;
+
   while (!devConfig->isFinal())
   {
     TransitionMachine::State * currentState = tm.getCurrentState();
@@ -82,12 +87,46 @@ std::map<std::string, float> Trainer::getScoreOnDev()
       TransitionMachine::Transition * transition = tm.getTransition(actionName);
       tm.takeTransition(transition);
       devConfig->moveHead(transition->headMvt);
+
+      if (ProgramParameters::printEntropy)
+      {
+        nbActionsInSequence++;
+
+        entropyAccumulator += Classifier::computeEntropy(weightedActions);
+
+        if (devConfig->head >= 1 && devConfig->getTape(ProgramParameters::sequenceDelimiterTape)[devConfig->head-1] != ProgramParameters::sequenceDelimiter)
+        justFlipped = false;
+
+        if ((devConfig->head >= 1 && devConfig->getTape(ProgramParameters::sequenceDelimiterTape)[devConfig->head-1] == ProgramParameters::sequenceDelimiter && !justFlipped))
+        {
+          justFlipped = true;
+          entropyAccumulator /= nbActionsInSequence;
+          nbActionsInSequence = 0;
+          entropies.emplace_back(entropyAccumulator);
+          entropyAccumulator = 0.0;
+        }
+      }
+
     }
   }
 
-  std::map<std::string, float> scores;
+  std::map<std::string, std::pair<float,std::pair<float,float> > > scores;
   for (auto & it : counts)
-    scores[it.first] = 100.0 * it.second.second / it.second.first;
+  {
+    scores[it.first].first = 100.0 * it.second.second / it.second.first;
+    if (ProgramParameters::printEntropy)
+    {
+      for (float f : entropies)
+        scores[it.first].second.first += f;
+
+      scores[it.first].second.first /= entropies.size();
+
+      for (float f : entropies)
+        scores[it.first].second.second += (f-scores[it.first].second.first)*(f-scores[it.first].second.first);
+
+      scores[it.first].second.second /= entropies.size();
+    }
+  }
 
   return scores;
 }
@@ -273,9 +312,9 @@ void Trainer::printScoresAndSave(FILE * output)
   {
     for (auto & it : devScores)
     {
-      if (bestScores.count(it.first) == 0 || bestScores[it.first] < it.second)
+      if (bestScores.count(it.first) == 0 || bestScores[it.first] < it.second.first)
       {
-        bestScores[it.first] = it.second;
+        bestScores[it.first] = it.second.first;
         saved[it.first] = true;
       }
       else
@@ -314,8 +353,10 @@ void Trainer::printScoresAndSave(FILE * output)
     names.emplace_back(it.first);
     acc.emplace_back("accuracy");
     train.emplace_back(": train(" + float2str(scores[it.first], "%.2f") + "%)");
-    dev.emplace_back(devConfig ? "dev(" +float2str(devScores[it.first], "%.2f") + "%)" : "");
+    dev.emplace_back(devConfig ? "dev(" +float2str(devScores[it.first].first, "%.2f") + "%)" : "");
     savedStr.emplace_back(saved[it.first] ? "SAVED" : "");
+    if (ProgramParameters::printEntropy)
+      savedStr.back() += " Entropy[" + float2str(devScores[it.first].second.first, "%.2f") + "\u00B1" + float2str(devScores[it.first].second.second, "%.2f") + "]";
   }
 
   if (ProgramParameters::interactive)
diff --git a/trainer/src/macaon_train.cpp b/trainer/src/macaon_train.cpp
index 980b2d0..4122411 100644
--- a/trainer/src/macaon_train.cpp
+++ b/trainer/src/macaon_train.cpp
@@ -40,6 +40,7 @@ po::options_description getOptionsDescription()
   opt.add_options()
     ("help,h", "Produce this help message")
     ("debug,d", "Print infos on stderr")
+    ("printEntropy", "Print mean entropy and standard deviation accross sequences")
     ("optimizer", po::value<std::string>()->default_value("amsgrad"),
       "The learning algorithm to use : amsgrad | adam | sgd")
     ("dev", po::value<std::string>()->default_value(""),
@@ -237,6 +238,7 @@ int main(int argc, char * argv[])
   ProgramParameters::bdName = vm["bd"].as<std::string>();
   ProgramParameters::mcdName = vm["mcd"].as<std::string>();
   ProgramParameters::debug = vm.count("debug") == 0 ? false : true;
+  ProgramParameters::printEntropy = vm.count("printEntropy") == 0 ? false : true;
   ProgramParameters::printTime = vm.count("printTime") == 0 ? false : true;
   ProgramParameters::trainName = vm["train"].as<std::string>();
   ProgramParameters::devName = vm["dev"].as<std::string>();
diff --git a/transition_machine/include/Classifier.hpp b/transition_machine/include/Classifier.hpp
index 1a3bbf6..667f6a1 100644
--- a/transition_machine/include/Classifier.hpp
+++ b/transition_machine/include/Classifier.hpp
@@ -70,6 +70,13 @@ class Classifier
   /// @param threshold Print only the n most weighted Action.
   static void printWeightedActions(FILE * output, WeightedActions & wa, int threshold = 5);
 
+  /// @brief Get a measure of confidence.
+  ///
+  /// @param wa The actions with a score associated for each one.
+  ///
+  /// @return A measure of confidence.
+  static float computeEntropy(WeightedActions & wa);
+
   /// @brief Convert a string to its corresponding Type.
   ///
   /// If s is unknown, the program aborts.
diff --git a/transition_machine/src/Classifier.cpp b/transition_machine/src/Classifier.cpp
index 5ff853b..106b189 100644
--- a/transition_machine/src/Classifier.cpp
+++ b/transition_machine/src/Classifier.cpp
@@ -266,3 +266,16 @@ void Classifier::explainCostOfActions(FILE * output, Config & config)
   }
 }
 
+float Classifier::computeEntropy(WeightedActions & wa)
+{
+  float entropy = 0.0;
+ 
+  for (unsigned int i = 0; i < 2 && i < wa.size(); i++)
+  {
+    auto it = wa.begin() + i;
+    entropy -= it->second.first - (it->second.first - wa[0].second.first);
+  }
+
+  return entropy;
+}
+
-- 
GitLab