@inproceedings{380e6ec50b4d4ee3bec3c63353f593a3,
title = "DISBELIEVE: Distance Between Client Models Is Very Essential for Effective Local Model Poisoning Attacks",
abstract = "Federated learning is a promising direction to tackle the privacy issues related to sharing patients{\textquoteright} sensitive data. Often, federated systems in the medical image analysis domain assume that the participating local clients are honest. Several studies report mechanisms through which a set of malicious clients can be introduced that can poison the federated setup, hampering the performance of the global model. To overcome this, robust aggregation methods have been proposed that defend against those attacks. We observe that most of the state-of-the-art robust aggregation methods are heavily dependent on the distance between the parameters or gradients of malicious clients and benign clients, which makes them prone to local model poisoning attacks when the parameters or gradients of malicious and benign clients are close. Leveraging this, we introduce DISBELIEVE, a local model poisoning attack that creates malicious parameters or gradients such that their distance to benign clients{\textquoteright} parameters or gradients is low respectively but at the same time their adverse effect on the global model{\textquoteright}s performance is high. Experiments on three publicly available medical image datasets demonstrate the efficacy of the proposed DISBELIEVE attack as it significantly lowers the performance of the state-of-the-art robust aggregation methods for medical image analysis. Furthermore, compared to state-of-the-art local model poisoning attacks, DISBELIEVE attack is also effective on natural images where we observe a severe drop in classification performance of the global model for multi-class classification on benchmark dataset CIFAR-10.",
keywords = "Deep Learning, Federated Learning, Model Poisoning Attacks",
author = "Indu Joshi and Priyank Upadhya and Nayak, {Gaurav Kumar} and Peter Sch{\"u}ffler and Nassir Navab",
note = "Publisher Copyright: {\textcopyright} The Author(s), under exclusive license to Springer Nature Switzerland AG 2023.; 26th International Conference on Medical Image Computing and Computer-Assisted Intervention , MICCAI 2023 ; Conference date: 08-10-2023 Through 12-10-2023",
year = "2023",
doi = "10.1007/978-3-031-47401-9_29",
language = "English",
isbn = "9783031474002",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "297--310",
editor = "Celebi, {M. Emre} and Salekin, {Md Sirajus} and Hyunwoo Kim and Shadi Albarqouni",
booktitle = "Medical Image Computing and Computer Assisted Intervention – MICCAI 2023 Workshops - ISIC 2023, Care-AI 2023, MedAGI 2023, DeCaF 2023, Held in Conjunction with MICCAI 2023, Proceedings",
}