@inbook{c0c8ead28a784ea8a3dea6fe54a3b2f1,
title = "Refined Algorithms for Infinitely Many-Armed Bandits with Deterministic Rewards",
abstract = "We consider a variant of the Multi-Armed Bandit problem which involves a large pool of a priori identical arms (or items). Each arm is associated with a deterministic value, which is sampled from a probability distribution with unknown maximal value, and is revealed once that arm is chosen. At each time instant the agent may choose a new arm (with unknown value), or a previously-chosen arm whose value is already revealed. The goal is to minimize the cumulative regret relative to the best arm in the pool. Previous work has established a lower bound on the regret for this model, depending on the functional form of the tail of the sample distribution, as well as algorithms that attain this bound up to logarithmic terms. Here, we present a more refined algorithm that attains the same order as the lower bound. We further consider several variants of the basic model, involving an anytime algorithm and the case of non-retainable arms. Numerical experiments demonstrate the superior performance of the suggested algorithms.",
keywords = "Many-armed bandits, Regret minimization",
author = "Yahel David and Nahum Shimkin",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing Switzerland 2015.; European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases, ECML PKDD 2015 ; Conference date: 07-09-2015 Through 11-09-2015",
year = "2015",
doi = "10.1007/978-3-319-23528-8\_29",
language = "الإنجليزيّة",
isbn = "978-3-319-23527-1",
volume = "9284",
series = "Lecture Notes in Artificial Intelligence",
pages = "464--479",
editor = "Annalisa Appice and Jo{\~a}o Gama and Costa, \{Vitor Santos\} and Al{\'i}pio Jorge and Rodrigues, \{Pedro Pereira\} and Soares Soares",
booktitle = "MACHINE LEARNING AND KNOWLEDGE DISCOVERY IN DATABASES, ECML PKDD 2015, PT I",
}