@inproceedings{5d056b4bac1a420c8c89784a1c192cdb,
title = "Neighboring Words Affect Human Interpretation of Saliency Explanations",
abstract = "Word-level saliency explanations (“heat maps over words”) are often used to communicate feature-attribution in text-based models. Recent studies found that superficial factors such as word length can distort human interpretation of the communicated saliency scores. We conduct a user study to investigate how the marking of a word's neighboring words affect the explainee's perception of the word's importance in the context of a saliency explanation. We find that neighboring words have significant effects on the word's importance rating. Concretely, we identify that the influence changes based on neighboring direction (left vs. right) and a-priori linguistic and computational measures of phrases and collocations (vs. unrelated neighboring words). Our results question whether text-based saliency explanations should be continued to be communicated at word level, and inform future research on alternative saliency explanation methods.",
author = "Alon Jacovi and Hendrik Schuff and Heike Adel and Vu, {Ngoc Thang} and Yoav Goldberg",
note = "Publisher Copyright: {\textcopyright} 2023 Association for Computational Linguistics.; 61st Annual Meeting of the Association for Computational Linguistics, ACL 2023 ; Conference date: 09-07-2023 Through 14-07-2023",
year = "2023",
month = jan,
day = "1",
doi = "https://doi.org/10.18653/v1/2023.findings-acl.750",
language = "الإنجليزيّة",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "11816--11833",
booktitle = "Findings of the Association for Computational Linguistics, ACL 2023",
address = "الولايات المتّحدة",
}