Machine Learning (ML) is crucial in many sectors, including computer vision. However, ML models trained on sensitive data face security challenges, as they can be attacked and leak information. Privacy-Preserving Machine Learning (PPML) addresses this by using Differential Privacy (DP) to balance utility and privacy. This study identifies image dataset characteristics that affect the utility and vulnerability of private and non-private Convolutional Neural Network (CNN) models. Through analyzing multiple datasets and privacy budgets, we find that imbalanced datasets increase vulnerability in minority classes, but DP mitigates this issue. Data…(more)
Please log in to take part in the discussion (add own reviews or comments).
Cite this publication
More citation styles
- please select -
%0 Journal Article
%1 lange2024assessing
%A Lange, Lucas
%A Heykeroth, Maurice-Maximilian
%A Rahm, Erhard
%D 2024
%I arXiv
%J arXiv preprint arXiv:2409.01329
%K area_responsibleai area_bigdata (cs.CR), (cs.CV), (cs.LG), Computer Cryptography FOS: Learning Machine Pattern Recognition Security Vision and ep information sciences
%R 10.48550/ARXIV.2409.01329
%T Assessing the Impact of Image Dataset Features on Privacy-Preserving Machine Learning
%X Machine Learning (ML) is crucial in many sectors, including computer vision. However, ML models trained on sensitive data face security challenges, as they can be attacked and leak information. Privacy-Preserving Machine Learning (PPML) addresses this by using Differential Privacy (DP) to balance utility and privacy. This study identifies image dataset characteristics that affect the utility and vulnerability of private and non-private Convolutional Neural Network (CNN) models. Through analyzing multiple datasets and privacy budgets, we find that imbalanced datasets increase vulnerability in minority classes, but DP mitigates this issue. Datasets with fewer classes improve both model utility and privacy, while high entropy or low Fisher Discriminant Ratio (FDR) datasets deteriorate the utility-privacy trade-off. These insights offer valuable guidance for practitioners and researchers in estimating and optimizing the utility-privacy trade-off in image datasets, helping to inform data and privacy modifications for better outcomes based on dataset characteristics.
@article{lange2024assessing,
abstract = {Machine Learning (ML) is crucial in many sectors, including computer vision. However, ML models trained on sensitive data face security challenges, as they can be attacked and leak information. Privacy-Preserving Machine Learning (PPML) addresses this by using Differential Privacy (DP) to balance utility and privacy. This study identifies image dataset characteristics that affect the utility and vulnerability of private and non-private Convolutional Neural Network (CNN) models. Through analyzing multiple datasets and privacy budgets, we find that imbalanced datasets increase vulnerability in minority classes, but DP mitigates this issue. Datasets with fewer classes improve both model utility and privacy, while high entropy or low Fisher Discriminant Ratio (FDR) datasets deteriorate the utility-privacy trade-off. These insights offer valuable guidance for practitioners and researchers in estimating and optimizing the utility-privacy trade-off in image datasets, helping to inform data and privacy modifications for better outcomes based on dataset characteristics.},
added-at = {2024-11-28T13:27:37.000+0100},
archiveprefix = {arXiv},
author = {Lange, Lucas and Heykeroth, Maurice-Maximilian and Rahm, Erhard},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2e48fc10e3401466a928a16ecc5ef942b/scadsfct},
copyright = {arXiv.org perpetual, non-exclusive license},
doi = {10.48550/ARXIV.2409.01329},
eprint = {2409.01329},
file = {:lange2024assessing.pdf:PDF:http\://arxiv.org/pdf/2409.01329v1},
interhash = {8d45c7ea77cad4618d75f45d037672b6},
intrahash = {e48fc10e3401466a928a16ecc5ef942b},
journal = {arXiv preprint arXiv:2409.01329},
keywords = {area_responsibleai area_bigdata (cs.CR), (cs.CV), (cs.LG), Computer Cryptography FOS: Learning Machine Pattern Recognition Security Vision and ep information sciences},
month = sep,
primaryclass = {cs.LG},
publisher = {arXiv},
timestamp = {2024-11-28T17:40:57.000+0100},
title = {Assessing the Impact of Image Dataset Features on Privacy-Preserving Machine Learning},
year = 2024
}