@inproceedings{5a00485ae3444d2ebee3026be6c0e3ec,
title = "Discussion Paper: The Integrity of Medical AI",
abstract = "Deep learning has proven itself to be an incredible asset to the medical community. However, with offensive AI, the technology can be turned against medical community; adversarial samples can be used to cause misdiagnosis and medical deepfakes can be used fool both radiologists and machines alike. In this short discussion paper, we talk about the issue of offensive AI and from the perspective of healthcare. We discuss how defense researchers in this domain have responded to the threat and their the current challenges. We conclude by arguing that conventional security mechanisms are a better approach towards mitigating these threats over algorithm based solutions.",
keywords = "adversarial examples, adversarial machine learning, deep fake, deepfake, medical deepfake, medicine, offensive ai, radiology, security",
author = "Yisroel Mirsky",
note = "Publisher Copyright: {\textcopyright} 2022 ACM.; 1st ACM Workshop on Security Implications of Deepfakes and Cheapfakes, WDC 2022, co-located with ACM AsiaCCS 2022 ; Conference date: 30-05-2022",
year = "2022",
month = may,
day = "30",
doi = "10.1145/3494109.3527191",
language = "American English",
series = "WDC 2022 - Proceedings of the 1st Workshop on Security Implications of Deepfakes and Cheapfakes",
pages = "31--33",
booktitle = "WDC 2022 - Proceedings of the 1st Workshop on Security Implications of Deepfakes and Cheapfakes",
}