@inproceedings{fd6a0b15843a4469bb6137281df18c6a,
title = "Grad-SAM: Explaining Transformers via Gradient Self-Attention Maps",
abstract = "Transformer-based language models significantly advanced the state-of-the-art in many linguistic tasks. As this revolution continues, the ability to explain model predictions has become a major area of interest for the NLP community. In this work, we present Gradient Self-Attention Maps (Grad-SAM) - a novel gradient-based method that analyzes self-attention units and identifies the input elements that explain the model's prediction the best. Extensive evaluations on various benchmarks show that Grad-SAM obtains significant improvements over state-of-the-art alternatives.",
keywords = "bert, deep learning, explainable & interpretable ai, nlp, self-attention, transformers, transparent machine learning",
author = "Oren Barkan and Edan Hauon and Avi Caciularu and Ori Katz and Itzik Malkiel and Omri Armstrong and Noam Koenigstein",
note = "Publisher Copyright: {\textcopyright} 2021 ACM.; 30th ACM International Conference on Information and Knowledge Management, CIKM 2021 ; Conference date: 01-11-2021 Through 05-11-2021",
year = "2021",
month = oct,
day = "26",
doi = "https://doi.org/10.1145/3459637.3482126",
language = "الإنجليزيّة",
isbn = "9781450384469",
series = "International Conference on Information and Knowledge Management, Proceedings",
pages = "2882--2887",
booktitle = "CIKM 2021 - Proceedings of the 30th ACM International Conference on Information and Knowledge Management",
}