@inproceedings{10.1145/3568444.3568469,
title = {ExplAInable Pixels: Investigating One-Pixel Attacks on Deep Learning Models with Explainable Visualizations},
author = {Jonas Keppel and Jonathan Liebers and Jonas Auda and Uwe Gruenefeld and Stefan Schneegass},
url = {https://doi.org/10.1145/3568444.3568469},
doi = {10.1145/3568444.3568469},
isbn = {9781450398206},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 21st International Conference on Mobile and Ubiquitous Multimedia},
pages = {231–242},
publisher = {Association for Computing Machinery},
address = {Lisbon, Portugal},
series = {MUM '22},
abstract = {Nowadays, deep learning models enable numerous safety-critical applications, such as biometric authentication, medical diagnosis support, and self-driving cars. However, previous studies have frequently demonstrated that these models are attackable through slight modifications of their inputs, so-called adversarial attacks. Hence, researchers proposed investigating examples of these attacks with explainable artificial intelligence to understand them better. In this line, we developed an expert tool to explore adversarial attacks and defenses against them. To demonstrate the capabilities of our visualization tool, we worked with the publicly available CIFAR-10 dataset and generated one-pixel attacks. After that, we conducted an online evaluation with 16 experts. We found that our tool is usable and practical, providing evidence that it can support understanding, explaining, and preventing adversarial examples.},
keywords = {Deep Learning, Security},
pubstate = {published},
tppubtype = {inproceedings}
}