@inproceedings{e39cb11c898d4270a0d15f900225e912,
title = "CROW: Benchmarking Commonsense Reasoning in Real-World Tasks",
abstract = "Recent efforts in natural language processing (NLP) commonsense reasoning research have yielded a considerable number of new datasets and benchmarks. However, most of these datasets formulate commonsense reasoning challenges in artificial scenarios that are not reflective of the tasks which real-world NLP systems are designed to solve. In this work, we present CROW, a manually-curated, multitask benchmark that evaluates the ability of models to apply commonsense reasoning in the context of six real-world NLP tasks. CROW is constructed using a multi-stage data collection pipeline that rewrites examples from existing datasets using commonsense-violating perturbations. We use CROWto study how NLP systems perform across different dimensions of commonsense knowledge, such as physical, temporal, and social reasoning. We find a significant performance gap when NLP systems are evaluated on CROWcompared to humans, showcasing that commonsense reasoning is far from being solved in real-world task settings. We make our dataset and leaderboard available to the research community.",
author = "Mete Ismayilzada and Debjit Paul and Syrielle Montariol and Mor Geva and Antoine Bosselut",
note = "Publisher Copyright: {\textcopyright} 2023 Association for Computational Linguistics.; 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023 ; Conference date: 06-12-2023 Through 10-12-2023",
year = "2023",
doi = "10.18653/v1/2023.emnlp-main.607",
language = "الإنجليزيّة",
series = "EMNLP 2023 - 2023 Conference on Empirical Methods in Natural Language Processing, Proceedings",
publisher = "Association for Computational Linguistics (ACL)",
pages = "9785--9821",
editor = "Houda Bouamor and Juan Pino and Kalika Bali",
booktitle = "EMNLP 2023 - 2023 Conference on Empirical Methods in Natural Language Processing, Proceedings",
address = "الولايات المتّحدة",
}