@inproceedings{097c7623fdaa464d977b42f5c9de9323,
title = "Never Go Full Batch (in Stochastic Convex Optimization)",
abstract = "We study the generalization performance of full-batch optimization algorithms for stochastic convex optimization: these are first-order methods that only access the exact gradient of the empirical risk (rather than gradients with respect to individual data points), that include a wide range of algorithms such as gradient descent, mirror descent, and their regularized and/or accelerated variants. We provide a new separation result showing that, while algorithms such as stochastic gradient descent can generalize and optimize the population risk to within ε after (1/ε2) iterations, full-batch methods either need at least Ω(1/ε4) iterations or exhibit a dimension-dependent sample complexity.",
author = "Idan Amir and Tomer Koren and Yair Carmon and Roi Livni",
note = "Publisher Copyright: {\textcopyright} 2021 Neural information processing systems foundation. All rights reserved.; 35th Conference on Neural Information Processing Systems, NeurIPS 2021 ; Conference date: 06-12-2021 Through 14-12-2021",
year = "2021",
language = "الإنجليزيّة",
series = "Advances in Neural Information Processing Systems",
publisher = "Neural information processing systems foundation",
pages = "25033--25043",
editor = "Marc'Aurelio Ranzato and Alina Beygelzimer and Yann Dauphin and Liang, {Percy S.} and {Wortman Vaughan}, Jenn",
booktitle = "Advances in Neural Information Processing Systems 34 - 35th Conference on Neural Information Processing Systems, NeurIPS 2021",
address = "الولايات المتّحدة",
}