@inproceedings{4a19d546224940638836d18382f07d66,
title = "Brief announcement: Strong scaling of matrix multiplication algorithms and memory-independent communication lower bounds",
abstract = "A parallel algorithm has perfect strong scaling if its running time on P processors is linear in 1/P, including all communication costs. Distributed-memory parallel algorithms for matrix multiplication with perfect strong scaling have only recently been found. One is based on classical matrix multiplication (Solomonik and Demmel, 2011), and one is based on Strassen's fast matrix multiplication (Ballard, Demmel, Holtz, Lipshitz, and Schwartz, 2012). Both algorithms scale perfectly, but only up to some number of processors where the inter-processor communication no longer scales. We obtain a memory-independent communication cost lower bound on classical and Strassen-based distributed-memory matrix multiplication algorithms. These bounds imply that no classical or Strassen-based parallel matrix multiplication algorithm can strongly scale perfectly beyond the ranges already attained by the two parallel algorithms mentioned above. The memory-independent bounds and the strong scaling bounds generalize to other algorithms. Copyright is held by the author/owner(s).",
keywords = "Communication-avoiding algorithms, Fast matrix multiplication, Strong scaling",
author = "Grey Ballard and James Demmel and Olga Holtz and Benjamin Lipshitz and Oded Schwartz",
year = "2012",
doi = "10.1145/2312005.2312021",
language = "الإنجليزيّة",
isbn = "9781450312134",
series = "Annual ACM Symposium on Parallelism in Algorithms and Architectures",
pages = "77--79",
booktitle = "SPAA'12 - Proceedings of the 24th ACM Symposium on Parallelism in Algorithms and Architectures",
note = "24th ACM Symposium on Parallelism in Algorithms and Architectures, SPAA'12 ; Conference date: 25-06-2012 Through 27-06-2012",
}