diff --git a/UD_any/evaluate.sh b/UD_any/evaluate.sh
index 8b8bddab82c2e518edd45a79c8e9dd3d70c13b33..7f3909662e4e8f92e52b8a5ae6bab518c032594f 100755
--- a/UD_any/evaluate.sh
+++ b/UD_any/evaluate.sh
@@ -51,18 +51,34 @@ then
   print_usage_and_exit
 fi
 
+MCD="ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL"
+NO=""
+for arg in "$@"
+do
+  if [ "$NO" = "1" ]
+  then
+    MCD="$arg" 
+    NO=""
+  fi
+  if [ "$arg" = "--mcd" ]
+  then
+    NO="1"
+  fi
+done
+
 EVALCONLL="../scripts/conll18_ud_eval.py"
 OUTPUT=$EXPPATH"/predicted_eval.tsv"
 
-if [ "$MODE" = "tsv" ]; then
-macaon decode --model $EXPPATH --inputTSV $REF $@ > $OUTPUT && $EVALCONLL $REF $OUTPUT || exit 1
-exit 0
+INPUT="$REF"
+INPUTARG="--inputTSV"
+if [ "$MODE" = "txt" ]; then
+  INPUT="$REFRAW"
+  INPUTARG="--inputTXT"
 fi
 
-if [ "$MODE" = "txt" ]; then
-macaon decode --model $EXPPATH --inputTXT $REFRAW $@ > $OUTPUT && $EVALCONLL $REF $OUTPUT || exit 1
-exit 0
+if [ ! -f "$OUTPUT" ]; then
+  macaon decode --model $EXPPATH $INPUTARG $INPUT $@ > $OUTPUT || exit 1
 fi
 
-print_usage_and_exit
+$EVALCONLL --mcd $MCD $REF $OUTPUT || exit 1
 
diff --git a/UD_any/launchBatches.py b/UD_any/launchBatches.py
index 3dd7ebb3b15397d8a89ba30fff99ae98ecb2a746..157d5d72e3df208830005e08cd1108fb84866038 100755
--- a/UD_any/launchBatches.py
+++ b/UD_any/launchBatches.py
@@ -26,10 +26,10 @@ def addNamesAndCommandsTrain(names, commands, mode, expName, arguments, seed, pr
 ###############################################################################
 
 ###############################################################################
-def addNamesAndCommandsDecode(names, commands, mode, expName, arguments, pretrained) :
+def addNamesAndCommandsDecode(names, commands, mode, expName, arguments) :
   names.append(expName)
   
-  commands.append("./evaluate.sh {} bin/{} pretrained={} --silent {}".format(mode, expName, pretrained,arguments))
+  commands.append("./evaluate.sh {} bin/{} --silent {}".format(mode, expName, arguments))
 ###############################################################################
 
 ###############################################################################
@@ -64,7 +64,7 @@ if __name__ == "__main__" :
           prepareExperiment(xp['lang'],xp['template'],xp['expName'])
           addNamesAndCommandsTrain(names, commands, xp['mode'],xp['expName'],xp['arguments'],seed=100+i, pretrained=pretrained)
         else :
-          addNamesAndCommandsDecode(names, commands, xp['mode'],xp['expName'],xp['evalArguments'], pretrained=pretrained)
+          addNamesAndCommandsDecode(names, commands, xp['mode'],xp['expName'],xp['evalArguments'])
 
   launchSlurmArray(names, commands, name, device, nbHours, limit, nbCPU)
 ###############################################################################
diff --git a/UD_any/templates/tagparser_incr/machine.rm b/UD_any/templates/tagparser_incr/machine.rm
index a4e542a35c65605cd5c113dc65586f8668efaee1..14406adc94137dc8a14fe7df6f8ae100afbe902b 100644
--- a/UD_any/templates/tagparser_incr/machine.rm
+++ b/UD_any/templates/tagparser_incr/machine.rm
@@ -1,31 +1,31 @@
 Name : Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
 Classifier : tagparser
 {
-	Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts lemmatizer_case,data/lemmatizer_case.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
 	LossMultiplier : {}
 	Network type : Modular
-  Contextual : Window{-10 10} Columns{FORM} LSTM{1 1 0 1} In{64} Out{128} w2v{FORM,data/FORM.w2v} Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1}
-  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID} LSTM{1 1 0 1} In{64} Out{64} w2v{}
-  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{UPOS FEATS DEPREL} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{300} Out{128} w2v{FORM,data/W2V/fasttext.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
   Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
   Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
-  History : NbElem{10} LSTM{1 1 0 1} In{32} Out{32}
-	InputDropout : 0.3
-	MLP : {1600 0.3 1600 0.3}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
 	End
   Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
   Type : classification
   Loss : crossentropy
 }
-Splitwords : data/splitwords.ts
 Predictions : UPOS FEATS LEMMA HEAD DEPREL EOS
 Strategy
 {
 	Block : End{cannotMove}
 	tagger morpho * 0
 	morpho lemmatizer_rules * 0
-  lemmatizer_rules lemmatizer_case * 0
-  lemmatizer_case parser * 0
+  lemmatizer_rules parser * 0
 	parser segmenter eager_SHIFT 0
 	parser segmenter eager_RIGHT_rel 0
 	parser parser * 0
diff --git a/UD_any/templates/tagparser_incr_nopretrained/machine.rm b/UD_any/templates/tagparser_incr_nopretrained/machine.rm
new file mode 100644
index 0000000000000000000000000000000000000000..3b513b45f4751e5c97d09aca65b04ad6eb2d5a01
--- /dev/null
+++ b/UD_any/templates/tagparser_incr_nopretrained/machine.rm
@@ -0,0 +1,33 @@
+Name : Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
+Classifier : tagparser
+{
+	Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	LossMultiplier : {}
+	Network type : Modular
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{128} Out{128} w2v{FORM,data/FORM.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
+  Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
+	End
+  Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
+  Type : classification
+  Loss : crossentropy
+}
+Predictions : UPOS FEATS LEMMA HEAD DEPREL EOS
+Strategy
+{
+	Block : End{cannotMove}
+	tagger morpho * 0
+	morpho lemmatizer_rules * 0
+  lemmatizer_rules parser * 0
+	parser segmenter eager_SHIFT 0
+	parser segmenter eager_RIGHT_rel 0
+	parser parser * 0
+	segmenter tagger * 1
+}
diff --git a/UD_any/templates/tagparser_seq/machine.rm b/UD_any/templates/tagparser_seq/machine.rm
index 80d27294dc930200cd080c6e03ded2b830654e75..a605f226d4e686de0aa8ac013e1fc15d055afa0d 100644
--- a/UD_any/templates/tagparser_seq/machine.rm
+++ b/UD_any/templates/tagparser_seq/machine.rm
@@ -1,22 +1,24 @@
 Name : Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
 Classifier : tagparser
 {
-  Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts lemmatizer_case,data/lemmatizer_case.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
-  LossMultiplier : {}
+	Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	LossMultiplier : {}
 	Network type : Modular
-  Contextual : Window{-10 10} Columns{FORM} LSTM{1 1 0 1} In{64} Out{128} w2v{FORM,data/FORM.w2v} Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1}
-  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{300} Out{128} w2v{FORM,data/W2V/fasttext.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
   Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
   Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
-  History : NbElem{10} LSTM{1 1 0 1} In{32} Out{32}
-	InputDropout : 0.3
-	MLP : {1600 0.3 1600 0.3}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
 	End
   Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
   Type : classification
   Loss : crossentropy
 }
-Splitwords : data/splitwords.ts
 Predictions : UPOS FEATS LEMMA HEAD DEPREL EOS
 Strategy
 {
@@ -25,8 +27,7 @@ Strategy
 	Block : End{cannotMove}
 	morpho morpho * 1
 	Block : End{cannotMove}
-	lemmatizer_rules lemmatizer_case * 0
-  lemmatizer_case lemmatizer_rules * 1
+  lemmatizer_rules lemmatizer_rules * 1
 	Block : End{cannotMove}
 	parser segmenter eager_SHIFT 0
 	parser segmenter eager_RIGHT_rel 0
diff --git a/UD_any/templates/tagparser_seq_nopretrained/machine.rm b/UD_any/templates/tagparser_seq_nopretrained/machine.rm
new file mode 100644
index 0000000000000000000000000000000000000000..0691aa7416b26563e241015823e3f8460a4939b7
--- /dev/null
+++ b/UD_any/templates/tagparser_seq_nopretrained/machine.rm
@@ -0,0 +1,36 @@
+Name : Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
+Classifier : tagparser
+{
+	Transitions : {tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	LossMultiplier : {}
+	Network type : Modular
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{128} Out{128} w2v{FORM,data/FORM.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
+  Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
+	End
+  Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
+  Type : classification
+  Loss : crossentropy
+}
+Predictions : UPOS FEATS LEMMA HEAD DEPREL EOS
+Strategy
+{
+	Block : End{cannotMove}
+	tagger tagger * 1
+	Block : End{cannotMove}
+	morpho morpho * 1
+	Block : End{cannotMove}
+  lemmatizer_rules lemmatizer_rules * 1
+	Block : End{cannotMove}
+	parser segmenter eager_SHIFT 0
+	parser segmenter eager_RIGHT_rel 0
+	parser parser * 0
+	segmenter parser * 1
+}
diff --git a/UD_any/templates/tokeparser_incr/machine.rm b/UD_any/templates/tokeparser_incr/machine.rm
index ad72b25a70f6b6136fd9568f4c49573555634471..f7ea26a1b8cd4e77611a3f4b26608d9d6c746bb3 100644
--- a/UD_any/templates/tokeparser_incr/machine.rm
+++ b/UD_any/templates/tokeparser_incr/machine.rm
@@ -4,7 +4,7 @@ Classifier : tokeparser
 	Transitions : {tokenizer,data/tokenizer.ts tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
 	LossMultiplier : {}
 	Network type : Modular
-  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} Concat{1 1 0.0 1} In{300} Out{64} w2v{FORM,data/W2V/fasttextcleanfiltered.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{300} Out{128} w2v{FORM,data/W2V/fasttext.w2v}
   Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
   Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
   Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
diff --git a/UD_any/templates/tokeparser_incr_nopretrained/machine.rm b/UD_any/templates/tokeparser_incr_nopretrained/machine.rm
new file mode 100644
index 0000000000000000000000000000000000000000..a4a37fc44beb88348b4e8fdc78b40761c2d6cdfc
--- /dev/null
+++ b/UD_any/templates/tokeparser_incr_nopretrained/machine.rm
@@ -0,0 +1,39 @@
+Name : Tokenizer, Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
+Classifier : tokeparser
+{
+	Transitions : {tokenizer,data/tokenizer.ts tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	LossMultiplier : {}
+	Network type : Modular
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{128} Out{128} w2v{FORM,data/FORM.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
+  Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+	RawInput : Left{5} Right{10} LSTM{1 1 0.0 1} In{32} Out{32}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	SplitTrans : LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
+	End
+  Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
+  Type : classification
+  Loss : crossentropy
+}
+Splitwords : data/splitwords.ts
+Predictions : ID FORM UPOS FEATS LEMMA HEAD DEPREL EOS
+Strategy
+{
+	Block : End{cannotMove}
+	tokenizer tagger ENDWORD 0
+	tokenizer tagger SPLIT 0
+	tokenizer tokenizer * 0
+	tagger morpho * 0
+	morpho lemmatizer_rules * 0
+  lemmatizer_rules parser * 0
+	parser segmenter eager_SHIFT 0
+	parser segmenter eager_RIGHT_rel 0
+	parser parser * 0
+	segmenter tokenizer * 1
+}
diff --git a/UD_any/templates/tokeparser_seq/machine.rm b/UD_any/templates/tokeparser_seq/machine.rm
index f63c06ace318cee0098a9df03572ef34e1a151e0..d3e801049bb4947e91ade1a76632ff1e1159697d 100644
--- a/UD_any/templates/tokeparser_seq/machine.rm
+++ b/UD_any/templates/tokeparser_seq/machine.rm
@@ -4,7 +4,7 @@ Classifier : tokeparser
 	Transitions : {tokenizer,data/tokenizer.ts tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
 	LossMultiplier : {}
 	Network type : Modular
-  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} Concat{1 1 0.0 1} In{300} Out{64} w2v{FORM,data/W2V/fasttextcleanfiltered.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{300} Out{128} w2v{FORM,data/W2V/fasttext.w2v}
   Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
   Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
   Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
diff --git a/UD_any/templates/tokeparser_seq_nopretrained/machine.rm b/UD_any/templates/tokeparser_seq_nopretrained/machine.rm
new file mode 100644
index 0000000000000000000000000000000000000000..eee655bbfda5f302908912b2a598660e9a8d81a2
--- /dev/null
+++ b/UD_any/templates/tokeparser_seq_nopretrained/machine.rm
@@ -0,0 +1,43 @@
+Name : Tokenizer, Tagger, Morpho, Lemmatizer, Parser and Segmenter Machine
+Classifier : tokeparser
+{
+	Transitions : {tokenizer,data/tokenizer.ts tagger,data/tagger.ts morpho,data/morpho_whole.ts lemmatizer_rules,data/lemmatizer_rules.ts parser,data/parser_eager_rel_strict.ts segmenter,data/segmenter.ts}
+	LossMultiplier : {}
+	Network type : Modular
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{FORM} LSTM{1 1 0.0 1} In{128} Out{128} w2v{FORM,data/FORM.w2v}
+  Context : Targets{b.-3 b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 b.0.0 s.0.0 s.0.-1 s.1.0 s.1.-1 s.2.0 s.2.-1} Columns{EOS ID UPOS FEATS DEPREL} LSTM{1 1 0 1} In{128} Out{64} w2v{}
+  Focused : Column{prefix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+  Focused : Column{suffix3:FORM} NbElem{3} Buffer{0} Stack{} LSTM{1 1 0 1} In{64} Out{64} w2v{}
+	RawInput : Left{5} Right{10} LSTM{1 1 0.0 1} In{32} Out{32}
+  History : NbElem{10} LSTM{1 1 0 1} In{128} Out{64}
+  HistoryMine : NbElem{4} LSTM{1 1 0 1} In{128} Out{64}
+  StateName : Out{64}
+  Distance : FromBuffer{} FromStack{0 1 2} ToBuffer{0} ToStack{} Threshold{15} LSTM{1 1 0.0 1} In{128} Out{64}
+	SplitTrans : LSTM{1 1 0.0 1} In{128} Out{64}
+	InputDropout : 0.5
+	MLP : {3200 0.4 1600 0.4}
+	End
+  Optimizer : Adagrad {0.01 0.000001 0 0.0000000001}
+  Type : classification
+  Loss : crossentropy
+}
+Splitwords : data/splitwords.ts
+Predictions : ID FORM UPOS FEATS LEMMA HEAD DEPREL EOS
+Strategy
+{
+	Block : End{cannotMove}
+	tokenizer tokenizer ENDWORD 1
+	tokenizer tokenizer SPLIT 1
+	tokenizer tokenizer * 0
+	Block : End{cannotMove}
+	tagger tagger * 1
+	Block : End{cannotMove}
+	morpho morpho * 1
+	Block : End{cannotMove}
+  lemmatizer_rules lemmatizer_rules * 1
+	Block : End{cannotMove}
+	parser segmenter eager_SHIFT 0
+	parser segmenter eager_RIGHT_rel 0
+	parser parser * 0
+	segmenter parser * 1
+}