@inproceedings{127fcd7d950144dfa12573cee338bfd0,
title = "Shampoo: Preconditioned stochastic tensor optimization",
abstract = "Preconditioned gradient methods are among the most general and powerful tools in optimization. However, preconditioning requires storing and manipulating prohibitively large matrices. We describe and analyze a new structure-aware preconditioning algorithm, called Shampoo, for stochastic optimization over tensor spaces. Shampoo maintains a set of preconditioning matrices, each of which operates on a single dimension, contracting over the remaining dimensions. We establish convergence guarantees in the stochastic convex setting, the proof of which builds upon matrix trace inequalities. Our experiments with state- of-the-art deep learning models show that Shampoo is capable of converging considerably faster than commonly used optimizers. Surprisingly, although it involves a more complex update rule, Shampoo's runtime per step is comparable in practice to that of simple gradient methods such as SGD, AdaGrad, and Adam.",
author = "Vineet Gupta and Tomer Koren and Yoram Singer",
note = "Publisher Copyright: {\textcopyright} 2018 35th International Conference on Machine Learning, ICML 2018. All rights reserved.; 35th International Conference on Machine Learning, ICML 2018 ; Conference date: 10-07-2018 Through 15-07-2018",
year = "2018",
language = "الإنجليزيّة",
series = "35th International Conference on Machine Learning, ICML 2018",
pages = "2956--2964",
editor = "Jennifer Dy and Andreas Krause",
booktitle = "35th International Conference on Machine Learning, ICML 2018",
}