@inproceedings{594fc3deeff940a9ae41b3090513c28f,
title = "Learning Under Delayed Feedback: Implicitly Adapting to Gradient Delays",
abstract = "We consider stochastic convex optimization problems, where several machines act asynchronously in parallel while sharing a common memory. We propose a robust training method for the constrained setting and derive non asymptotic convergence guarantees that do not depend on prior knowledge of update delays, objective smoothness, and gradient variance. Conversely, existing methods for this setting crucially rely on this prior knowledge, which render them unsuitable for essentially all shared-resources computational environments, such as clouds and data centers. Concretely, existing approaches are unable to accommodate changes in the delays which result from dynamic allocation of the machines, while our method implicitly adapts to such changes.",
author = "Aviv, \{Rotem Zamir\} and Ido Hakimi and Assaf Schuster and Levy, \{Kfir Y.\}",
note = "Publisher Copyright: Copyright {\textcopyright} 2021 by the author(s); 38th International Conference on Machine Learning, ICML 2021 ; Conference date: 18-07-2021 Through 24-07-2021",
year = "2021",
language = "الإنجليزيّة",
series = "Proceedings of Machine Learning Research",
publisher = "ML Research Press",
pages = "436--445",
booktitle = "Proceedings of the 38th International Conference on Machine Learning, ICML 2021",
}