@inproceedings{40e992905a4443d4adf825fb6312f468,
title = "Contrastive Explanations for Model Interpretability",
abstract = "Contrastive explanations clarify why an event occurred in contrast to another. They are inherently intuitive to humans to both produce and comprehend. We propose a method to produce contrastive explanations in the latent space, via a projection of the input representation, such that only the features that differentiate two potential decisions are captured. Our modification allows model behavior to consider only contrastive reasoning, and uncover which aspects of the input are useful for and against particular decisions. Additionally, for a given input feature, our contrastive explanations can answer for which label, and against which alternative label, is the feature useful. We produce contrastive explanations via both high-level abstract concept attribution and low-level input token/span attribution for two NLP classification benchmarks. Our findings demonstrate the ability of label-contrastive explanations to provide fine-grained interpretability of model decisions.",
author = "Alon Jacovi and Swabha Swayamdipta and Shauli Ravfogel and Yanai Elazar and Yejin Choi and Yoav Goldberg",
note = "Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics; 2021 Conference on Empirical Methods in Natural Language Processing, EMNLP 2021 ; Conference date: 07-11-2021 Through 11-11-2021",
year = "2021",
month = jan,
day = "1",
doi = "10.18653/v1/2021.emnlp-main.120",
language = "الإنجليزيّة",
series = "EMNLP 2021 - 2021 Conference on Empirical Methods in Natural Language Processing, Proceedings",
publisher = "Association for Computational Linguistics (ACL)",
pages = "1597--1611",
booktitle = "EMNLP 2021 - 2021 Conference on Empirical Methods in Natural Language Processing, Proceedings",
address = "الولايات المتّحدة",
}