@inproceedings{18eece0ba9004f9aa453d73ed1be5fb4,
title = "Realistic Evaluation Principles for Cross-document Coreference Resolution",
abstract = "We point out that common evaluation practices for cross-document coreference resolution have been unrealistically permissive in their assumed settings, yielding inflated results. We propose addressing this issue via two evaluation methodology principles. First, as in other tasks, models should be evaluated on predicted mentions rather than on gold mentions. Doing this raises a subtle issue regarding singleton coreference clusters, which we address by decoupling the evaluation of mention detection from that of coreference linking. Second, we argue that models should not exploit the synthetic topic structure of the standard ECB+ dataset, forcing models to confront the lexical ambiguity challenge, as intended by the dataset creators. We demonstrate empirically the drastic impact of our more realistic evaluation principles on a competitive model, yielding a score which is 33 F1 lower compared to evaluating by prior lenient practices.",
author = "Arie Cattan and Alon Eirew and Gabriel Stanovsky and Mandar Joshi and Ido Dagan",
note = "Publisher Copyright: {\textcopyright} 2021 Lexical and Computational Semantics; 10th Conference on Lexical and Computational Semantics, *SEM 2021 ; Conference date: 05-08-2021 Through 06-08-2021",
year = "2021",
language = "الإنجليزيّة",
series = "*SEM 2021 - 10th Conference on Lexical and Computational Semantics, Proceedings of the Conference",
publisher = "Association for Computational Linguistics (ACL)",
pages = "143--151",
editor = "Lun-Wei Ku and Vivi Nastase and Ivan Vulic",
booktitle = "*SEM 2021 - 10th Conference on Lexical and Computational Semantics, Proceedings of the Conference",
address = "الولايات المتّحدة",
}