@inproceedings{3ef766917ac14a1382186099d6b95948,
title = "Conformal Nucleus Sampling",
abstract = "Language models generate text based on successively sampling the next word. A decoding procedure based on nucleus (top-p) sampling chooses from the smallest possible set of words whose cumulative probability exceeds the probability p. In this work, we assess whether a top-p set is indeed aligned with its probabilistic meaning in various linguistic contexts. We employ conformal prediction, a calibration procedure that focuses on the construction of minimal prediction sets according to a desired confidence level, to calibrate the parameter p as a function of the entropy of the next word distribution. We find that OPT models are overconfident, and that calibration shows a moderate inverse scaling with model size. https://github.com/shauli-ravfogel/conformal-prediction.",
author = "Shauli Ravfogel and Yoav Goldberg and Jacob Goldberger",
note = "Publisher Copyright: {\textcopyright} 2023 Association for Computational Linguistics.; 61st Annual Meeting of the Association for Computational Linguistics, ACL 2023 ; Conference date: 09-07-2023 Through 14-07-2023",
year = "2023",
month = jan,
day = "1",
doi = "https://doi.org/10.18653/v1/2023.findings-acl.3",
language = "الإنجليزيّة",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "27--34",
booktitle = "Findings of the Association for Computational Linguistics, ACL 2023",
address = "الولايات المتّحدة",
}