@inproceedings{7ac3e39165464c3689ecf6402830d50f,
title = "Transformer-based Hebrew NLP models for Short Answer Scoring in Biology",
abstract = "Pre-trained large language models (PLMs) are adaptable to a wide range of downstream tasks by fine-tuning their rich contextual embeddings to the task, often without requiring much task-specific data. In this paper, we explore the use of a recently developed Hebrew PLM – alephBERT – for automated short answer grading of high school biology items. We show that the alephBERT-based system outperforms a strong CNN-based baseline, and that it generalizes unexpectedly well in a zero-shot paradigm to items on an unseen topic that address the same underlying biological concepts, opening up the possibility of automatically assessing new items without item-specific fine-tuning.",
author = "Schleifer, {Abigail Gurin} and Klebanov, {Beata Beigman} and Moriah Ariely and Giora Alexandron",
note = "Publisher Copyright: {\textcopyright} 2023 Association for Computational Linguistics.; 18th Workshop on Innovative Use of NLP for Building Educational Applications, BEA 2023 ; Conference date: 13-07-2023",
year = "2023",
language = "الإنجليزيّة",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "550--555",
editor = "Ekaterina Kochmar and Jill Burstein and Andrea Horbach and Ronja Laarmann-Quante and Nitin Madnani and Anais Tack and Victoria Yaneva and Zheng Yuan and Torsten Zesch",
booktitle = "BEA 2023 - 18th Workshop on Innovative Use of NLP for Building Educational Applications, Proceedings of the Workshop",
address = "الولايات المتّحدة",
}