diff --git a/Impact_of_Grammar_on_Language_Model_Comprehension.pdf b/Impact_of_Grammar_on_Language_Model_Comprehension.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..ccf09325834b7f7e51b65f7c5e180ea13e120605
Binary files /dev/null and b/Impact_of_Grammar_on_Language_Model_Comprehension.pdf differ
diff --git a/_OceanofPDF.com_Hands-On_Large_Language_Models_-_Jay_Alammar.pdf b/_OceanofPDF.com_Hands-On_Large_Language_Models_-_Jay_Alammar.pdf
index 7ea8d1d1e1e72fa3fa55aa0670d549d17048c2e7..3bb2259bdb3c37a26a59a9aa76399f1aeae655fd 100644
Binary files a/_OceanofPDF.com_Hands-On_Large_Language_Models_-_Jay_Alammar.pdf and b/_OceanofPDF.com_Hands-On_Large_Language_Models_-_Jay_Alammar.pdf differ
diff --git a/amu-literature.bib b/amu-literature.bib
index 342e5ebfa40ae3792f4b249e3f60fb55cc6108a5..378d8dd5934f9a8d1f71c0ec35e3e8ff4ffc67d9 100644
--- a/amu-literature.bib
+++ b/amu-literature.bib
@@ -1066,6 +1066,20 @@
   url       = {https://books.google.ru/books?id=iE8hEQAAQBAJ},
 }
 
+@InProceedings{10074239,
+  author    = {Ameri, Kimia and Hempel, Michael and Sharif, Hamid and Lopez, Juan and Perumalla, Kalyan},
+  booktitle = {2023 International Conference on Computing, Networking and Communications (ICNC)},
+  title     = {Impact of Grammar on Language Model Comprehension},
+  year      = {2023},
+  month     = {Feb},
+  pages     = {19-24},
+  abstract  = {Machine Learning and Natural Language Processing are playing an increasingly vital role in many different areas, including cybersecurity in Information Technology and Operational Technology networking, with many associated research challenges. In this paper, we introduce a new language model based on transformers with the addition of syntactical information into the embedding process. We show that our proposed Structurally Enriched Transformer (SET) language model outperforms baseline datasets on a number of downstream tasks from the GLUE benchmark. Our model improved CoLA classification by 11 points over the BERT-Base model. The performance of attention-based models has been demonstrated to be significantly better than that of traditional algorithms in several NLP tasks. Transformers are comprised of multi attention heads stacked on top of each others. A Transformer is capable of generating abstract representations of tokens input to an encoder based on their relationship to all tokens in a sequence. Despite the fact that such models can learn syntactic features based on examples alone, researchers have found that explicitly feeding this information to deep learning models can significantly boost their performance. A complex model like transformers may benefit from leveraging syntactic information such as part of speech (POS).},
+  doi       = {10.1109/ICNC57223.2023.10074239},
+  file      = {:Impact_of_Grammar_on_Language_Model_Comprehension.pdf:PDF},
+  groups    = {ml-architechtures},
+  keywords  = {Deep learning;Computational modeling;Syntactics;Transformers;Natural language processing;Magnetic heads;Grammar;Natural Language Processing;Transfer Learning;Transformers;BERT;Part of Speech;Grammar Enriched},
+}
+
 @Comment{jabref-meta: databaseType:bibtex;}
 
 @Comment{jabref-meta: grouping: