@inproceedings{35053f249af144498020a37f014ae20d,
title = "Same Task, More Tokens: the Impact of Input Length on the Reasoning Performance of Large Language Models",
abstract = "This paper explores the impact of extending input lengths on the capabilities of Large Language Models (LLMs). Despite LLMs advancements in recent times, their performance consistency across different input lengths is not well understood. We investigate this aspect by introducing a novel QA reasoning framework, specifically designed to assess the impact of input length. We isolate the effect of input length using multiple versions of the same sample, each being extended with padding of different lengths, types and locations. Our findings show a notable degradation in LLMs' reasoning performance at much shorter input lengths than their technical maximum. We show that the degradation trend appears in every version of our dataset, although at different intensities. Additionally, our study reveals that the traditional metric of next word prediction correlates negatively with performance of LLMs' on our reasoning dataset. We analyse our results and identify failure modes that can serve as useful guides for future research, potentially informing strategies to address the limitations observed in LLMs.",
author = "Mosh Levy and Alon Jacoby and Yoav Goldberg",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics.; 62nd Annual Meeting of the Association for Computational Linguistics, ACL 2024 ; Conference date: 11-08-2024 Through 16-08-2024",
year = "2024",
language = "الإنجليزيّة",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "15339--15353",
editor = "Lun-Wei Ku and Martins, {Andre F. T.} and Vivek Srikumar",
booktitle = "Long Papers",
address = "الولايات المتّحدة",
}