@inproceedings{b3283490ead949f7a15108d29ec532c5,
title = "Near-Optimal Regret for Adversarial MDP with Delayed Bandit Feedback",
abstract = "The standard assumption in reinforcement learning (RL) is that agents observe feedback for their actions immediately. However, in practice feedback is often observed in delay. This paper studies online learning in episodic Markov decision process (MDP) with unknown transitions, adversarially changing costs, and unrestricted delayed bandit feedback. More precisely, the feedback for the agent in episode k is revealed only in the end of episode k + dk, where the delay dk can be changing over episodes and chosen by an oblivious adversary.",
author = "Tiancheng Jin and Haipeng Luo and Tal Lancewicki and Yishay Mansour and Aviv Rosenberg",
note = "Publisher Copyright: {\textcopyright} 2022 Neural information processing systems foundation. All rights reserved.; 36th Conference on Neural Information Processing Systems, NeurIPS 2022 ; Conference date: 28-11-2022 Through 09-12-2022",
year = "2022",
language = "الإنجليزيّة",
series = "Advances in Neural Information Processing Systems",
publisher = "Neural information processing systems foundation",
editor = "S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh",
booktitle = "Advances in Neural Information Processing Systems 35 - 36th Conference on Neural Information Processing Systems, NeurIPS 2022",
address = "الولايات المتّحدة",
}