@inproceedings{15333fe1b3bd453ba1f26dd67ee8050b,
title = "Initialization-Dependent Sample Complexity of Linear Predictors and Neural Networks",
abstract = "We provide several new results on the sample complexity of vector-valued linear predictors (parameterized by a matrix), and more generally neural networks. Focusing on size-independent bounds, where only the Frobenius norm distance of the parameters from some fixed reference matrix W0 is controlled, we show that the sample complexity behavior can be surprisingly different than what we may expect considering the well-studied setting of scalar-valued linear predictors. This also leads to new sample complexity bounds for feed-forward neural networks, tackling some open questions in the literature, and establishing a new convex linear prediction problem that is provably learnable without uniform convergence.",
author = "Roey Magen and Ohad Shamir",
note = "Publisher Copyright: {\textcopyright} 2023 Neural information processing systems foundation. All rights reserved.; 37th Conference on Neural Information Processing Systems, NeurIPS 2023 ; Conference date: 10-12-2023 Through 16-12-2023",
year = "2023",
language = "الإنجليزيّة",
volume = "36",
series = "Advances in Neural Information Processing Systems",
pages = "7632--7658",
editor = "A Oh and T Neumann and A Globerson and K Saenko and M Hardt and S Levine",
booktitle = "Advances in Neural Information Processing Systems",
}