@inproceedings{44ac180bda1c4704b10e54fdd5778eab,
title = "On making reading comprehension more comprehensive",
abstract = "Machine reading comprehension, the task of evaluating a machine's ability to comprehend a passage of text, has seen a surge in popularity in recent years. There are many datasets that are targeted at reading comprehension, and many systems that perform as well as humans on some of these datasets. Despite all of this interest, there is no work that systematically defines what reading comprehension is. In this work, we justify a question answering approach to reading comprehension and describe the various kinds of questions one might use to more fully test a system's comprehension of a passage, moving beyond questions that only probe local predicate-argument structures. The main pitfall of this approach is that questions can easily have surface cues or other biases that allow a model to shortcut the intended reasoning process. We discuss ways proposed in current literature to mitigate these shortcuts, and we conclude with recommendations for future dataset collection efforts.",
author = "Matt Gardner and Jonathan Berant and Hannaneh Hajishirzi and Alon Talmor and Sewon Min",
note = "Publisher Copyright: {\textcopyright} 2019 MRQA@EMNLP 2019 - Proceedings of the 2nd Workshop on Machine Reading for Question Answering. All rights reserved.; 2nd Workshop on Machine Reading for Question Answering, MRQA@EMNLP 2019 ; Conference date: 04-11-2019",
year = "2019",
language = "الإنجليزيّة",
series = "MRQA@EMNLP 2019 - Proceedings of the 2nd Workshop on Machine Reading for Question Answering",
publisher = "Association for Computational Linguistics (ACL)",
pages = "105--112",
booktitle = "MRQA@EMNLP 2019 - Proceedings of the 2nd Workshop on Machine Reading for Question Answering",
address = "الولايات المتّحدة",
}