@inproceedings{03f62e5d34494519a1b3b1b55d32446f,
title = "Alias-Free Convnets: Fractional Shift Invariance via Polynomial Activations",
abstract = "Although CNNs are believed to be invariant to translations, recent works have shown this is not the case due to aliasing effects that stem from down-sampling layers. The existing architectural solutions to prevent the aliasing effects are partial since they do not solve those effects that originate in non-linearities. We propose an extended antialiasing method that tackles both down-sampling and nonlinear layers, thus creating truly alias-free, shift-invariant CNNs11Our code is available at github.com/hmichaeli/alias-free-convnets/.. We show that the presented model is invariant to integer as well as fractional (i.e., sub-pixel) translations, thus outperforming other shift-invariant methods in terms of robustness to adversarial translations.",
keywords = "Deep learning architectures and techniques",
author = "Hagay Michaeli and Tomer Michaeli and Daniel Soudry",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023 ; Conference date: 18-06-2023 Through 22-06-2023",
year = "2023",
doi = "https://doi.org/10.1109/CVPR52729.2023.01567",
language = "الإنجليزيّة",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
pages = "16333--16342",
booktitle = "Proceedings - 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023",
}