@inproceedings{189f9b7d540a440c80a60e19d94c6fef,
title = "RoMA: A Method for Neural Network Robustness Measurement and Assessment",
abstract = "Neural network models have become the leading solution for a large variety of tasks, such as classification, natural language processing, and others. However, their reliability is heavily plagued by adversarial inputs: inputs generated by adding tiny perturbations to correctly-classified inputs, and for which the neural network produces erroneous results. In this paper, we present a new method called Robustness Measurement and Assessment (RoMA), which measures the robustness of a neural network model against such adversarial inputs. Specifically, RoMA determines the probability that a random input perturbation might cause misclassification. The method allows us to provide formal guarantees regarding the expected frequency of errors that a trained model will encounter after deployment. The type of robustness assessment afforded by RoMA is inspired by state-of-the-art certification practices, and could constitute an important step toward integrating neural networks in safety-critical systems.",
keywords = "Adversarial examples, Certification, Neural networks, Robustness",
author = "Natan Levy and Guy Katz",
note = "Publisher Copyright: {\textcopyright} 2023, The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd.; 29th International Conference on Neural Information Processing, ICONIP 2022 ; Conference date: 22-11-2022 Through 26-11-2022",
year = "2023",
doi = "10.1007/978-981-99-1639-9\_8",
language = "الإنجليزيّة",
isbn = "9789819916382",
series = "Communications in Computer and Information Science",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "92--105",
editor = "Mohammad Tanveer and Sonali Agarwal and Seiichi Ozawa and Asif Ekbal and Adam Jatowt",
booktitle = "Neural Information Processing - 29th International Conference, ICONIP 2022, Proceedings",
address = "ألمانيا",
}