@inproceedings{fddef95d7a25468f8a1caea1067134a0,
title = "Creating a large benchmark for open information extraction",
abstract = "Open information extraction (Open IE) was presented as an unrestricted variant of traditional information extraction. It has been gaining substantial attention, manifested by a large number of automatic Open IE extractors and downstream applications. In spite of this broad attention, the Open IE task definition has been lacking - there are no formal guidelines and no large scale gold standard annotation. Subsequently, the various implementations of Open IE resorted to small scale post-hoc evaluations, inhibiting an objective and reproducible cross-system comparison. In this work, we develop a methodology that leverages the recent QA-SRL annotation to create a first independent and large scale Open IE annotation,1 and use it to automatically compare the most prominent Open IE systems.",
author = "Gabriel Stanovsky and Ido Dagan",
note = "Publisher Copyright: {\textcopyright} 2016 Association for Computational Linguistics; 2016 Conference on Empirical Methods in Natural Language Processing, EMNLP 2016 ; Conference date: 01-11-2016 Through 05-11-2016",
year = "2016",
doi = "https://doi.org/10.18653/v1/d16-1252",
language = "الإنجليزيّة",
series = "EMNLP 2016 - Conference on Empirical Methods in Natural Language Processing, Proceedings",
publisher = "Association for Computational Linguistics (ACL)",
pages = "2300--2305",
booktitle = "EMNLP 2016 - Conference on Empirical Methods in Natural Language Processing, Proceedings",
address = "الولايات المتّحدة",
}