diff --git a/MLP/include/MLP.hpp b/MLP/include/MLP.hpp index 14b011c5b1bc0696b4b6f3d71076bd288854e93d..479f403d1cf54f293c215b7d78c672d45115901f 100644 --- a/MLP/include/MLP.hpp +++ b/MLP/include/MLP.hpp @@ -37,6 +37,8 @@ class MLP /// @brief The seed that will be used by RNG (srand and dynet) static int randomSeed; + static bool dynetIsInit; + /// @brief Get the string corresponding to an Activation. /// /// @param a The activation. diff --git a/MLP/src/MLP.cpp b/MLP/src/MLP.cpp index af43355cb3dce4ec60379d2e56a8af3e96add5fa..7b64414ce9ae7fa1eca0a3fac7cc78ede0c87907 100644 --- a/MLP/src/MLP.cpp +++ b/MLP/src/MLP.cpp @@ -7,6 +7,7 @@ #include <dynet/io.h> int MLP::randomSeed = 0; +bool MLP::dynetIsInit = false; std::string MLP::activation2str(Activation a) { @@ -72,12 +73,10 @@ MLP::Activation MLP::str2activation(std::string s) void MLP::initDynet() { - static bool init = false; - - if(init) + if(dynetIsInit) return; - init = true; + dynetIsInit = true; dynet::initialize(getDefaultParams()); } diff --git a/maca_common/include/Dict.hpp b/maca_common/include/Dict.hpp index d547a3d7e402b800457f07b24e130f57c275c342..68de3278de280fab70e74ea837bd9e48824bfb38 100644 --- a/maca_common/include/Dict.hpp +++ b/maca_common/include/Dict.hpp @@ -203,6 +203,8 @@ class Dict /// @param directory The directory in which we will save every Dict. /// @param namePrefix The prefix of the name of the dicts we need to save. static void saveDicts(const std::string & directory, const std::string & namePrefix); + /// @brief Delete all Dicts. + static void deleteDicts(); /// @brief Save the current Dict in the corresponding file. void save(); /// @brief Get the vector value of an entry. diff --git a/maca_common/include/ProgramParameters.hpp b/maca_common/include/ProgramParameters.hpp index 1e8e5274214ead767108ac2b00b85ebf0f5efd35..5f0a5e491fa63a7b7f0c53211dbf681199563724 100644 --- a/maca_common/include/ProgramParameters.hpp +++ b/maca_common/include/ProgramParameters.hpp @@ -12,6 +12,7 @@ struct ProgramParameters static std::string input; static std::string expName; static std::string expPath; + static std::string baseExpName; static std::string langPath; static std::string templatePath; static std::string templateName; @@ -41,6 +42,7 @@ struct ProgramParameters static float dynamicProbability; static bool showFeatureRepresentation; static int iterationSize; + static int nbTrain; private : diff --git a/maca_common/src/Dict.cpp b/maca_common/src/Dict.cpp index 8a8179dab4f2fc32b3aec1237045524699e5ecd7..3e6f655c673b9f06c466166029704488ce57ec86 100644 --- a/maca_common/src/Dict.cpp +++ b/maca_common/src/Dict.cpp @@ -416,3 +416,8 @@ void Dict::printForDebug(FILE * output) fprintf(output, "Dict name \'%s\' nbElems = %lu\n", name.c_str(), str2vec.size()); } +void Dict::deleteDicts() +{ + str2dict.clear(); +} + diff --git a/maca_common/src/ProgramParameters.cpp b/maca_common/src/ProgramParameters.cpp index e864d3faf4e502adbb2f37af2e3ea4466e22774f..c84bf7cd82ba41e02b05dbf830465323990f21c8 100644 --- a/maca_common/src/ProgramParameters.cpp +++ b/maca_common/src/ProgramParameters.cpp @@ -6,6 +6,7 @@ ProgramParameters::ProgramParameters() std::string ProgramParameters::input; std::string ProgramParameters::expName; +std::string ProgramParameters::baseExpName; std::string ProgramParameters::expPath; std::string ProgramParameters::langPath; std::string ProgramParameters::templatePath; @@ -36,3 +37,4 @@ int ProgramParameters::dynamicEpoch; float ProgramParameters::dynamicProbability; bool ProgramParameters::showFeatureRepresentation; int ProgramParameters::iterationSize; +int ProgramParameters::nbTrain; diff --git a/trainer/src/macaon_train.cpp b/trainer/src/macaon_train.cpp index 5debf9da6ddf36425b83bd8731aa6f8dbd3570cb..3505310cbc7c99f1f6cdf00b4fa4e7292def445a 100644 --- a/trainer/src/macaon_train.cpp +++ b/trainer/src/macaon_train.cpp @@ -54,6 +54,8 @@ po::options_description getOptionsDescription() "Learning rate of the optimizer") ("seed,s", po::value<int>()->default_value(100), "The random seed that will initialize RNG") + ("nbTrain", po::value<int>()->default_value(0), + "The number of models that will be trained, with only the random seed changing") ("duplicates", po::value<bool>()->default_value(true), "Remove identical training examples") ("showFeatureRepresentation", po::value<bool>()->default_value(false), @@ -209,6 +211,7 @@ int main(int argc, char * argv[]) po::variables_map vm = checkOptions(od, argc, argv); ProgramParameters::expName = vm["expName"].as<std::string>(); + ProgramParameters::baseExpName = ProgramParameters::expName; ProgramParameters::templateName = vm["templateName"].as<std::string>(); ProgramParameters::tmName = vm["tm"].as<std::string>(); ProgramParameters::bdName = vm["bd"].as<std::string>(); @@ -219,6 +222,7 @@ int main(int argc, char * argv[]) ProgramParameters::lang = vm["lang"].as<std::string>(); ProgramParameters::nbIter = vm["nbiter"].as<int>(); ProgramParameters::seed = vm["seed"].as<int>(); + ProgramParameters::nbTrain = vm["nbTrain"].as<int>(); ProgramParameters::removeDuplicates = vm["duplicates"].as<bool>(); ProgramParameters::interactive = vm["interactive"].as<bool>(); ProgramParameters::shuffleExamples = vm["shuffle"].as<bool>(); @@ -232,11 +236,23 @@ int main(int argc, char * argv[]) ProgramParameters::showFeatureRepresentation = vm["showFeatureRepresentation"].as<bool>(); ProgramParameters::iterationSize = vm["iterationSize"].as<int>(); - for (int i = 0; i < 10; i++) + if (ProgramParameters::nbTrain) + { + for (int i = 0; i < ProgramParameters::nbTrain; i++) + { + fprintf(stderr, "Training number %d / %d :\n", i+1, ProgramParameters::nbTrain); + ProgramParameters::expName = ProgramParameters::baseExpName + "_" + std::to_string(i); + updatePaths(); + createExpPath(); + Dict::deleteDicts(); + launchTraining(); + } + } + else { - ProgramParameters::expName += "_" + std::to_string(i); updatePaths(); createExpPath(); + Dict::deleteDicts(); launchTraining(); }