{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T07:24:47Z","timestamp":1773818687122,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,8,14]],"date-time":"2022-08-14T00:00:00Z","timestamp":1660435200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,8,14]]},"DOI":"10.1145\/3534678.3539173","type":"proceedings-article","created":{"date-parts":[[2022,8,12]],"date-time":"2022-08-12T19:06:41Z","timestamp":1660331201000},"page":"2893-2902","update-policy":"https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":11,"title":["Alexa Teacher Model"],"prefix":"10.1145","author":[{"given":"Jack","family":"FitzGerald","sequence":"first","affiliation":[{"name":"Amazon, Denver, CO, USA"}]},{"given":"Shankar","family":"Ananthakrishnan","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Konstantine","family":"Arkoudas","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Davide","family":"Bernardi","sequence":"additional","affiliation":[{"name":"Amazon, Turin, AA, USA"}]},{"given":"Abhishek","family":"Bhagia","sequence":"additional","affiliation":[{"name":"Amazon, Seattle, WA, USA"}]},{"given":"Claudio","family":"Delli Bovi","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Jin","family":"Cao","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Rakesh","family":"Chada","sequence":"additional","affiliation":[{"name":"Amazon, Seattle, WA, USA"}]},{"given":"Amit","family":"Chauhan","sequence":"additional","affiliation":[{"name":"Amazon, Seattle, WA, USA"}]},{"given":"Luoxin","family":"Chen","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Anurag","family":"Dwarakanath","sequence":"additional","affiliation":[{"name":"Amazon, Bangalore, India"}]},{"given":"Satyam","family":"Dwivedi","sequence":"additional","affiliation":[{"name":"Amazon, Bangalore, India"}]},{"given":"Turan","family":"Gojayev","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Karthik","family":"Gopalakrishnan","sequence":"additional","affiliation":[{"name":"Amazon, Santa Clara, CA, USA"}]},{"given":"Thomas","family":"Gueudre","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Dilek","family":"Hakkani-Tur","sequence":"additional","affiliation":[{"name":"Amazon, Sunnyvale, CA, USA"}]},{"given":"Wael","family":"Hamza","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Jonathan J.","family":"H\u00fcser","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Kevin Martin","family":"Jose","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Haidar","family":"Khan","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Beiye","family":"Liu","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Jianhua","family":"Lu","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Alessandro","family":"Manzotti","sequence":"additional","affiliation":[{"name":"Amazon, Turin, Italy"}]},{"given":"Pradeep","family":"Natarajan","sequence":"additional","affiliation":[{"name":"Amazon, Chicago, IL, USA"}]},{"given":"Karolina","family":"Owczarzak","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Gokmen","family":"Oz","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Enrico","family":"Palumbo","sequence":"additional","affiliation":[{"name":"Spotify, Turin, Italy"}]},{"given":"Charith","family":"Peris","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Chandana Satya","family":"Prakash","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Stephen","family":"Rawls","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Andy","family":"Rosenbaum","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Anjali","family":"Shenoy","sequence":"additional","affiliation":[{"name":"Amazon, Bangalore, India"}]},{"given":"Saleh","family":"Soltan","sequence":"additional","affiliation":[{"name":"Amazon, New York, NY, USA"}]},{"given":"Mukund Harakere","family":"Sridhar","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Lizhen","family":"Tan","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Fabian","family":"Triefenbach","sequence":"additional","affiliation":[{"name":"Amazon, Aachen, Germany"}]},{"given":"Pan","family":"Wei","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Haiyang","family":"Yu","sequence":"additional","affiliation":[{"name":"Amazon, Cambridge, MA, USA"}]},{"given":"Shuai","family":"Zheng","sequence":"additional","affiliation":[{"name":"Amazon, Seattle, WA, USA"}]},{"given":"Gokhan","family":"Tur","sequence":"additional","affiliation":[{"name":"Amazon, Sunnyvale, CA, USA"}]},{"given":"Prem","family":"Natarajan","sequence":"additional","affiliation":[{"name":"Amazon, Los Angeles, CA, USA"}]}],"member":"320","published-online":{"date-parts":[[2022,8,14]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_2_1","volume-title":"Weinberger (Eds.)","volume":"27","author":"Ba Jimmy","year":"2014","unstructured":"Jimmy Ba and Rich Caruana. 2014. Do Deep Nets Really Need to be Deep?. In Advances in Neural Information Processing Systems, Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (Eds.), Vol. 27. Curran Associates, Inc. https:\/\/linproxy.fan.workers.dev:443\/https\/proceedings.neurips.cc\/paper\/2014\/file\/ea8fcd92d59581717e06eb187f10666d-Paper.pdf"},{"key":"e_1_3_2_2_3_1","volume-title":"Language Models are Few-Shot Learners","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. , Vol. 33 (2020), 1877--1901. https:\/\/linproxy.fan.workers.dev:443\/https\/proceedings.neurips.cc\/paper\/2020\/file\/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/1150402.1150464"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"crossref","unstructured":"Jin Cao Jun Wang Wael Hamza Kelly Vanee and Shang-Wen Li. 2020. Style Attuned Pre-training and Parameter Efficient Fine-tuning for Spoken Language Understanding. (2020).","DOI":"10.21437\/Interspeech.2020-2907"},{"key":"e_1_3_2_2_6_1","volume-title":"Bert for joint intent classification and slot filling. arXiv preprint arXiv:1902.10909","author":"Chen Qian","year":"2019","unstructured":"Qian Chen, Zhu Zhuo, and Wen Wang. 2019. Bert for joint intent classification and slot filling. arXiv preprint arXiv:1902.10909 (2019)."},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00489"},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_9_1","volume-title":"Garnett (Eds.)","volume":"32","author":"Guillaume Lample Alexis","year":"2019","unstructured":"Alexis CONNEAU and Guillaume Lample. 2019. Cross-lingual Language Model Pretraining. In Advances in Neural Information Processing Systems, H. Wallach, H. Larochelle, A. Beygelzimer, F. dtextquotesingle Alch\u00e9-Buc, E. Fox, and R. Garnett (Eds.), Vol. 32. Curran Associates, Inc. https:\/\/linproxy.fan.workers.dev:443\/https\/proceedings.neurips.cc\/paper\/2019\/file\/c04c19c2c2474dbf5f7ac4372c5b9af1-Paper.pdf"},{"key":"e_1_3_2_2_10_1","volume-title":"XNLI: Evaluating Cross-lingual Sentence Representations.","author":"Conneau Alexis","year":"2018","unstructured":"Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. XNLI: Evaluating Cross-lingual Sentence Representations."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_12_1","volume-title":"Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity. , 39 pages","author":"Fedus William","year":"2022","unstructured":"William Fedus, Barret Zoph, and Noam Shazeer. 2022. Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity. , 39 pages. https:\/\/linproxy.fan.workers.dev:443\/http\/jmlr.org\/papers\/v23\/21-0998.html"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2204.08582"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_16_1","volume-title":"NIPS Deep Learning and Representation Learning Workshop . https:\/\/linproxy.fan.workers.dev:443\/http\/arxiv.org\/abs\/1503","author":"Hinton Geoffrey","year":"2015","unstructured":"Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the Knowledge in a Neural Network. In NIPS Deep Learning and Representation Learning Workshop . https:\/\/linproxy.fan.workers.dev:443\/http\/arxiv.org\/abs\/1503.02531"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_18_1","unstructured":"Dhiraj Kalamkar Dheevatsa Mudigere Naveen Mellempudi Dipankar Das Kunal Banerjee Sasikanth Avancha Dharma Teja Vooturi Nataraj Jammalamadaka Jianyu Huang Hector Yuen Jiyan Yang Jongsoo Park Alexander Heinecke Evangelos Georganas Sudarshan Srinivasan Abhisek Kundu Misha Smelyanskiy Bharat Kaul and Pradeep Dubey. 2019. A Study of BFLOAT16 for Deep Learning Training. arxiv: 1905.12322 [cs.LG]"},{"key":"e_1_3_2_2_19_1","volume-title":"Scaling laws for neural language models. arxiv","author":"Kaplan Jared","year":"2005","unstructured":"Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arxiv: 2005.14165 [cs.LG]"},{"key":"e_1_3_2_2_20_1","volume-title":"Smith","author":"Keung Phillip","year":"2020","unstructured":"Phillip Keung, Y. Lu, Gy\u00f6rgy Szarvas, and Noah A. Smith. 2020. The Multilingual Amazon Reviews Corpus."},{"key":"e_1_3_2_2_21_1","volume-title":"Alexandre Muzio, Andres Felipe Cruz Salinas, Liyang Lu, Amr Hendy, Samyam Rajbhandari, Yuxiong He, and Hany Hassan Awadalla.","author":"Kim Young Jin","year":"2021","unstructured":"Young Jin Kim, Ammar Ahmad Awan, Alexandre Muzio, Andres Felipe Cruz Salinas, Liyang Lu, Amr Hendy, Samyam Rajbhandari, Yuxiong He, and Hany Hassan Awadalla. 2021. Scalable and Efficient MoE Training for Multitask Multilingual Models. arxiv: 2109.10465 [cs.CL]"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_23_1","unstructured":"Mike Lewis Shruti Bhosale Tim Dettmers Naman Goyal and Luke Zettlemoyer. 2021. BASE Layers: Simplifying Training of Large Sparse Models."},{"key":"e_1_3_2_2_24_1","volume-title":"RoBERTa: A Robustly Optimized BERT Pretraining Approach. arxiv","author":"Liu Yinhan","year":"1907","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arxiv: 1907.11692 [cs.CL]"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2383614"},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5963"},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","unstructured":"Deepak Narayanan Aaron Harlap Amar Phanishayee Vivek Seshadri Nikhil R. Devanur Gregory R. Ganger Phillip B. Gibbons and Matei Zaharia. 2019. PipeDream: Generalized Pipeline Parallelism for DNN Training. (2019) 1--15. https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.1145\/3341301.3359646","DOI":"10.1145\/3341301.3359646"},{"key":"e_1_3_2_2_28_1","volume-title":"Prashan Wanigasekara, and Haidar Khan.","author":"Peris Charith","year":"2020","unstructured":"Charith Peris, Gokmen Oz, Khadige Abboud, Venkata sai Varada Varada, Prashan Wanigasekara, and Haidar Khan. 2020. Using multiple ASR hypotheses to boost i18n NLU performance. (Dec. 2020), 30--39. https:\/\/linproxy.fan.workers.dev:443\/https\/aclanthology.org\/2020.icon-main.5"},{"key":"e_1_3_2_2_29_1","volume-title":"Manning","author":"Qi Peng","year":"2020","unstructured":"Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. 2020. Stanza: A Python Natural Language Processing Toolkit for Many Human Languages. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations . https:\/\/linproxy.fan.workers.dev:443\/https\/nlp.stanford.edu\/pubs\/qi2020stanza.pdf"},{"key":"e_1_3_2_2_30_1","unstructured":"A. Radford Jeffrey Wu R. Child David Luan Dario Amodei and Ilya Sutskever. 2019. Language Models are Unsupervised Multitask Learners."},{"key":"e_1_3_2_2_31_1","volume-title":"Liu","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. , 67 pages. https:\/\/linproxy.fan.workers.dev:443\/http\/jmlr.org\/papers\/v21\/20-074.html"},{"key":"e_1_3_2_2_32_1","volume-title":"Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He.","author":"Rajbhandari Samyam","year":"2022","unstructured":"Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He. 2022. DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale. arxiv: 2201.05596 [cs.LG]"},{"key":"e_1_3_2_2_33_1","volume-title":"ZeRO: Memory Optimization Towards Training A Trillion Parameter Models. CoRR","author":"Rajbhandari Samyam","year":"2054","unstructured":"Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. 2019. ZeRO: Memory Optimization Towards Training A Trillion Parameter Models. CoRR , Vol. abs\/1910.02054 (2019). showeprint[arXiv]1910.02054 https:\/\/linproxy.fan.workers.dev:443\/http\/arxiv.org\/abs\/1910.02054"},{"key":"e_1_3_2_2_34_1","volume-title":"ZeRO: Memory Optimizations Toward Training Trillion Parameter Models. arxiv","author":"Rajbhandari Samyam","year":"1910","unstructured":"Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. 2020. ZeRO: Memory Optimizations Toward Training Trillion Parameter Models. arxiv: 1910.02054 [cs.LG]"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"crossref","unstructured":"Milind Rao Pranav Dheram Gautam Tiwari Anirudh Raju Jasha Droppo Ariya Rastrow and Andreas Stolcke. 2021. DO as I Mean Not as I Say: Sequence Loss Training for Spoken Language Understanding. 7473--7477 pages.","DOI":"10.1109\/ICASSP39728.2021.9414566"},{"key":"e_1_3_2_2_36_1","volume-title":"a distilled version of BERT: smaller, faster, cheaper and lighter. arxiv","author":"Sanh Victor","year":"1910","unstructured":"Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2020. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arxiv: 1910.01108 [cs.CL]"},{"key":"e_1_3_2_2_37_1","volume-title":"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism. CoRR","author":"Shoeybi Mohammad","year":"2019","unstructured":"Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. 2019. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism. CoRR , Vol. abs\/1909.08053 (2019). showeprint[arXiv]1909.08053 https:\/\/linproxy.fan.workers.dev:443\/http\/arxiv.org\/abs\/1909.08053"},{"key":"e_1_3_2_2_38_1","volume-title":"Julie Bernauer, Xia Song, Mohammad Shoeybi, Yuxiong He, Michael Houston, Saurabh Tiwary, and Bryan Catanzaro.","author":"Smith Shaden","year":"2022","unstructured":"Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, Elton Zhang, Rewon Child, Reza Yazdani Aminabadi, Julie Bernauer, Xia Song, Mohammad Shoeybi, Yuxiong He, Michael Houston, Saurabh Tiwary, and Bryan Catanzaro. 2022. Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model. arxiv: 2201.11990 [cs.CL]"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"crossref","unstructured":"Gokhan Tur and Renato De Mori. 2011. Spoken Language Understanding: Systems for Extracting Semantic Information from Speech.","DOI":"10.1002\/9781119992691"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"crossref","unstructured":"G\u00f6khan T\u00fcr Jerry H Wright Allen L Gorin Giuseppe Riccardi and Dilek Hakkani-T\u00fcr. 2002. Improving spoken language understanding using word confusion networks.. In Interspeech .","DOI":"10.21437\/ICSLP.2002-374"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461905"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_44_1","volume-title":"2020 a. MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers. arxiv","author":"Wang Wenhui","year":"2002","unstructured":"Wenhui Wang, Furu Wei, Li Dong, Hangbo Bao, Nan Yang, and Ming Zhou. 2020 a. MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers. arxiv: 2002.10957 [cs.CL]"},{"key":"e_1_3_2_2_45_1","volume-title":"Advances in Neural Information Processing Systems","author":"Wang Wenhui","year":"2020","unstructured":"Wenhui Wang, Furu Wei, Li Dong, Hangbo Bao, Nan Yang, and Ming Zhou. 2020 b. MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 5776--5788. https:\/\/linproxy.fan.workers.dev:443\/https\/proceedings.neurips.cc\/paper\/2020\/file\/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2005.1511821"},{"key":"e_1_3_2_2_47_1","volume-title":"Proceedings of the 12th Language Resources and Evaluation Conference . European Language Resources Association","author":"Wenzek Guillaume","year":"2020","unstructured":"Guillaume Wenzek, Marie-Anne Lachaux, Alexis Conneau, Vishrav Chaudhary, Francisco Guzm\u00e1n, Armand Joulin, and Edouard Grave. 2020. CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data. In Proceedings of the 12th Language Resources and Evaluation Conference . European Language Resources Association, Marseille, France, 4003--4012. https:\/\/linproxy.fan.workers.dev:443\/https\/aclanthology.org\/2020.lrec-1.494"},{"key":"e_1_3_2_2_48_1","volume-title":"The Free Encyclopedia. https:\/\/linproxy.fan.workers.dev:443\/https\/en.wikipedia.org\/w\/index.php?title=J%C5%8Dy%C5%8D_kanji&oldid=1039289460 [Online","author":"Wikipedia Wikipedia","year":"2022","unstructured":"Wikipedia contributors. 2021. Wikipedia, The Free Encyclopedia. https:\/\/linproxy.fan.workers.dev:443\/https\/en.wikipedia.org\/w\/index.php?title=J%C5%8Dy%C5%8D_kanji&oldid=1039289460 [Online; accessed 28-January-2022]."},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","unstructured":"Chien-Sheng Wu Steven C.H. Hoi Richard Socher and Caiming Xiong. 2020. TOD-BERT: Pre-trained Natural Language Understanding for Task-Oriented Dialogue. 917--929 pages. https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.18653\/v1\/2020.emnlp-main.66","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_50_1","volume-title":"Multilingual Neural Semantic Parsing for Low-Resourced Languages. In The Tenth Joint Conference on Lexical and Computational Semantics .","author":"Xia Menglin","year":"2021","unstructured":"Menglin Xia and Emilio Monti. 2021. Multilingual Neural Semantic Parsing for Low-Resourced Languages. In The Tenth Joint Conference on Lexical and Computational Semantics ."},{"key":"e_1_3_2_2_51_1","unstructured":"Ruibin Xiong Yunchang Yang Di He Kai Zheng Shuxin Zheng Chen Xing Huishuai Zhang Yanyan Lan Liwei Wang and Tieyan Liu. 2020. On Layer Normalization in the Transformer Architecture. 10524--10533 pages. https:\/\/linproxy.fan.workers.dev:443\/https\/proceedings.mlr.press\/v119\/xiong20b.html"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","unstructured":"Weijia Xu Batool Haider and Saab Mansour. 2020. End-to-End Slot Alignment and Recognition for Cross-Lingual NLU . 5052--5063 pages. https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.18653\/v1\/2020.emnlp-main.410","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","unstructured":"Linting Xue Noah Constant Adam Roberts Mihir Kale Rami Al-Rfou Aditya Siddhant Aditya Barua and Colin Raffel. 2021. mT5: A Massively Multilingual Pre-trained Text-to-Text Transformer. 483--498 pages. https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.18653\/v1\/2021.naacl-main.41","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"crossref","unstructured":"Yinfei Yang Yuan Zhang Chris Tar and Jason Baldridge. 2019. PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification.","DOI":"10.18653\/v1\/D19-1382"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"crossref","unstructured":"Steve J. Young. 2002. Talking to machines (statistically speaking). In INTERSPEECH .","DOI":"10.21437\/ICSLP.2002-2"}],"event":{"name":"KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Washington DC USA","acronym":"KDD '22","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/dl.acm.org\/doi\/10.1145\/3534678.3539173","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/dl.acm.org\/doi\/pdf\/10.1145\/3534678.3539173","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:58Z","timestamp":1750186978000},"score":1,"resource":{"primary":{"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/dl.acm.org\/doi\/10.1145\/3534678.3539173"}},"subtitle":["Pretraining and Distilling Multi-Billion-Parameter Encoders for Natural Language Understanding Systems"],"short-title":[],"issued":{"date-parts":[[2022,8,14]]},"references-count":55,"alternative-id":["10.1145\/3534678.3539173","10.1145\/3534678"],"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.1145\/3534678.3539173","relation":{},"subject":[],"published":{"date-parts":[[2022,8,14]]},"assertion":[{"value":"2022-08-14","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}