2025
|
Abourayya, Amr; Kleesiek, Jens; Rao, Kanishka; Ayday, Erman; Rao, Bharat; Webb, Geoffrey I.; Kamp, Michael Little is Enough: Boosting Privacy by Sharing Only Hard Labels in Federated Semi-Supervised Learning (Proceedings Article) In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), AAAI, Forthcoming. @inproceedings{abourayya2025little,
title = {Little is Enough: Boosting Privacy by Sharing Only Hard Labels in Federated Semi-Supervised Learning},
author = {Amr Abourayya and Jens Kleesiek and Kanishka Rao and Erman Ayday and Bharat Rao and Geoffrey I. Webb and Michael Kamp},
year = {2025},
date = {2025-02-27},
urldate = {2025-02-27},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
publisher = {AAAI},
howpublished = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
keywords = {aimhi, FedCT, federated learning, semi-supervised},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
|
Dalleiger, Sebastian; Vreeken, Jilles; Kamp, Michael Federated Binary Matrix Factorization using Proximal Optimization (Proceedings Article) In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), AAAI, Forthcoming. @inproceedings{dalleiger2025federated,
title = {Federated Binary Matrix Factorization using Proximal Optimization},
author = {Sebastian Dalleiger and Jilles Vreeken and Michael Kamp},
year = {2025},
date = {2025-02-27},
urldate = {2025-02-27},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
publisher = {AAAI},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
|
2024
|
Salazer, Thomas L; Sheth, Naitik; Masud, Avais; Serur, David; Hidalgo, Guillermo; Aqeel, Iram; Adilova, Linara; Kamp, Michael; Fitzpatrick, Tim; Krishnan, Sriram; Rao, Kanishka; Rao, Bharat Artificial Intelligence (AI)-Driven Screening for Undiscovered CKD (Journal Article) In: Journal of the American Society of Nephrology, vol. 35, iss. 10S, pp. 10.1681, 2024. @article{salazer2024artificial,
title = {Artificial Intelligence (AI)-Driven Screening for Undiscovered CKD},
author = {Thomas L Salazer and Naitik Sheth and Avais Masud and David Serur and Guillermo Hidalgo and Iram Aqeel and Linara Adilova and Michael Kamp and Tim Fitzpatrick and Sriram Krishnan and Kanishka Rao and Bharat Rao},
year = {2024},
date = {2024-10-01},
journal = {Journal of the American Society of Nephrology},
volume = {35},
issue = {10S},
pages = {10.1681},
publisher = {LWW},
keywords = {CKD, healthcare, medicine, nephrology},
pubstate = {published},
tppubtype = {article}
}
|
Singh, Sidak Pal; Adilova, Linara; Kamp, Michael; Fischer, Asja; Schölkopf, Bernhard; Hofmann, Thomas Landscaping Linear Mode Connectivity (Proceedings Article) In: ICML Workshop on High-dimensional Learning Dynamics: The Emergence of Structure and Reasoning, 2024. @inproceedings{singh2024landscaping,
title = {Landscaping Linear Mode Connectivity},
author = {Sidak Pal Singh and Linara Adilova and Michael Kamp and Asja Fischer and Bernhard Sch\"{o}lkopf and Thomas Hofmann},
year = {2024},
date = {2024-09-01},
booktitle = {ICML Workshop on High-dimensional Learning Dynamics: The Emergence of Structure and Reasoning},
keywords = {deep learning, linear mode connectivity, theory of deep learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Chen, Siming; Gou, Liang; Kamp, Michael; Sunr, Dong Visual Computing for Autonomous Driving (Journal Article) In: IEEE Computer Graphics and Applications, vol. 44, iss. 3, pp. 11-13, 2024. @article{chen2024visual,
title = {Visual Computing for Autonomous Driving},
author = {Siming Chen and Liang Gou and Michael Kamp and Dong Sunr},
year = {2024},
date = {2024-06-21},
urldate = {2024-06-21},
journal = {IEEE Computer Graphics and Applications},
volume = {44},
issue = {3},
pages = {11-13},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Adilova, Linara; Andriushchenko, Maksym; Fischer, Michael Kamp Asja; Jaggi, Martin Layer-wise Linear Mode Connectivity (Proceedings Article) In: International Conference on Learning Representations (ICLR), Curran Associates, Inc, 2024. @inproceedings{adilova2024layerwise,
title = {Layer-wise Linear Mode Connectivity},
author = {Linara Adilova and Maksym Andriushchenko and Michael Kamp Asja Fischer and Martin Jaggi},
url = {https://openreview.net/pdf?id=LfmZh91tDI},
year = {2024},
date = {2024-05-07},
urldate = {2024-05-07},
booktitle = {International Conference on Learning Representations (ICLR)},
publisher = {Curran Associates, Inc},
abstract = {Averaging neural network parameters is an intuitive method for fusing the knowledge of two independent models. It is most prominently used in federated learning. If models are averaged at the end of training, this can only lead to a good performing model if the loss surface of interest is very particular, i.e., the loss in the exact middle between the two models needs to be sufficiently low. This is impossible to guarantee for the non-convex losses of state-of-the-art networks. For averaging models trained on vastly different datasets, it was proposed to average only the parameters of particular layers or combinations of layers, resulting in better performing models. To get a better understanding of the effect of layer-wise averaging, we analyse the performance of the models that result from averaging single layers, or groups of layers. Based on our empirical and theoretical investigation, we introduce a novel notion of the layer-wise linear connectivity, and show that deep networks do not have layer-wise barriers between them. We analyze additionally the layer-wise personalization averaging and conjecture that in particular problem setup all the partial aggregations result in the approximately same performance.},
keywords = {deep learning, layer-wise, linear mode connectivity},
pubstate = {published},
tppubtype = {inproceedings}
}
Averaging neural network parameters is an intuitive method for fusing the knowledge of two independent models. It is most prominently used in federated learning. If models are averaged at the end of training, this can only lead to a good performing model if the loss surface of interest is very particular, i.e., the loss in the exact middle between the two models needs to be sufficiently low. This is impossible to guarantee for the non-convex losses of state-of-the-art networks. For averaging models trained on vastly different datasets, it was proposed to average only the parameters of particular layers or combinations of layers, resulting in better performing models. To get a better understanding of the effect of layer-wise averaging, we analyse the performance of the models that result from averaging single layers, or groups of layers. Based on our empirical and theoretical investigation, we introduce a novel notion of the layer-wise linear connectivity, and show that deep networks do not have layer-wise barriers between them. We analyze additionally the layer-wise personalization averaging and conjecture that in particular problem setup all the partial aggregations result in the approximately same performance. |
Yang, Fan; Bodic, Pierre Le; Kamp, Michael; Boley, Mario Orthogonal Gradient Boosting for Interpretable Additive Rule Ensembles (Proceedings Article) In: Proceedings of the 26th International Conference on Artificial Intelligence and Statistics (AISTATS), 2024. @inproceedings{yang2024orthogonal,
title = {Orthogonal Gradient Boosting for Interpretable Additive Rule Ensembles},
author = {Fan Yang and Pierre Le Bodic and Michael Kamp and Mario Boley},
url = {http://trustworthyml.de/wp-content/uploads/2024/12/yang24b.pdf},
year = {2024},
date = {2024-05-02},
urldate = {2024-05-02},
booktitle = {Proceedings of the 26th International Conference on Artificial Intelligence and Statistics (AISTATS)},
abstract = {Gradient boosting of prediction rules is an efficient approach to learn potentially interpretable yet accurate probabilistic models. However, actual interpretability requires to limit the number and size of the generated rules, and existing boosting variants are not designed for this purpose. Though corrective boosting refits all rule weights in each iteration to minimise prediction risk, the included rule conditions tend to be sub-optimal, because commonly used objective functions fail to anticipate this refitting. Here, we address this issue by a new objective function that measures the angle between the risk gradient vector and the projection of the condition output vector onto the orthogonal complement of the already selected conditions. This approach correctly approximates the ideal update of adding the risk gradient itself to the model and favours the inclusion of more general and thus shorter rules. As we demonstrate using a wide range of prediction tasks, this significantly improves the comprehensibility/accuracy trade-off of the fitted ensemble. Additionally, we show how objective values for related rule conditions can be computed incrementally to avoid any substantial computational overhead of the new method.},
keywords = {complexity, explainability, interpretability, interpretable, machine learning, rule ensemble, rule mining, XAI},
pubstate = {published},
tppubtype = {inproceedings}
}
Gradient boosting of prediction rules is an efficient approach to learn potentially interpretable yet accurate probabilistic models. However, actual interpretability requires to limit the number and size of the generated rules, and existing boosting variants are not designed for this purpose. Though corrective boosting refits all rule weights in each iteration to minimise prediction risk, the included rule conditions tend to be sub-optimal, because commonly used objective functions fail to anticipate this refitting. Here, we address this issue by a new objective function that measures the angle between the risk gradient vector and the projection of the condition output vector onto the orthogonal complement of the already selected conditions. This approach correctly approximates the ideal update of adding the risk gradient itself to the model and favours the inclusion of more general and thus shorter rules. As we demonstrate using a wide range of prediction tasks, this significantly improves the comprehensibility/accuracy trade-off of the fitted ensemble. Additionally, we show how objective values for related rule conditions can be computed incrementally to avoid any substantial computational overhead of the new method. |
2023
|
Kamp, Michael; Fischer, Jonas; Vreeken, Jilles Federated Learning from Small Datasets (Proceedings Article) In: International Conference on Learning Representations (ICLR), 2023. @inproceedings{kamp2023federated,
title = {Federated Learning from Small Datasets},
author = {Michael Kamp and Jonas Fischer and Jilles Vreeken},
year = {2023},
date = {2023-05-01},
urldate = {2023-05-01},
booktitle = {International Conference on Learning Representations (ICLR)},
journal = {arXiv preprint arXiv:2110.03469},
keywords = {black-box, black-box parallelization, daisy, daisy-chaining, FedDC, federated learning, small, small datasets},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Mian, Michael Kamp David Kaltenpoth Osman Nothing but Regrets - Privacy-Preserving Federated Causal Discovery (Proceedings Article) In: International Conference on Artificial Intelligence and Statistics (AISTATS), 2023. @inproceedings{mian2022nothing,
title = {Nothing but Regrets - Privacy-Preserving Federated Causal Discovery},
author = {Michael Kamp David Kaltenpoth Osman Mian},
year = {2023},
date = {2023-04-25},
urldate = {2023-04-25},
booktitle = {International Conference on Artificial Intelligence and Statistics (AISTATS)},
keywords = {causal discovery, causality, explainable, federated, federated causal discovery, federated learning, interpretable},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Mian, Osman; Kamp, Michael; Vreeken, Jilles Information-Theoretic Causal Discovery and Intervention Detection over Multiple Environments (Proceedings Article) In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2023. @inproceedings{mian2023informationb,
title = {Information-Theoretic Causal Discovery and Intervention Detection over Multiple Environments},
author = {Osman Mian and Michael Kamp and Jilles Vreeken},
year = {2023},
date = {2023-02-07},
urldate = {2023-02-07},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
keywords = {causal discovery, causality, federated, federated causal discovery, federated learning, intervention},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Adilova, Linara; Chen, Siming; Kamp, Michael Informed Novelty Detection in Sequential Data by Per-Cluster Modeling (Proceedings Article) In: ICML workshop on Artificial Intelligence & Human Computer Interaction, 2023. @inproceedings{adilova2023informed,
title = {Informed Novelty Detection in Sequential Data by Per-Cluster Modeling},
author = {Linara Adilova and Siming Chen and Michael Kamp},
year = {2023},
date = {2023-01-01},
booktitle = {ICML workshop on Artificial Intelligence \& Human Computer Interaction},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Adilova, Linara; Abourayya, Amr; Li, Jianning; Dada, Amin; Petzka, Henning; Egger, Jan; Kleesiek, Jens; Kamp, Michael FAM: Relative Flatness Aware Minimization (Proceedings Article) In: ICML workshop on Topology, Algebra, and Geometry in Machine Learning Workshop, 2023. @inproceedings{adilova2023fam,
title = {FAM: Relative Flatness Aware Minimization},
author = {Linara Adilova and Amr Abourayya and Jianning Li and Amin Dada and Henning Petzka and Jan Egger and Jens Kleesiek and Michael Kamp},
year = {2023},
date = {2023-01-01},
booktitle = {ICML workshop on Topology, Algebra, and Geometry in Machine Learning Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Adilova, Linara; Kamp, Michael; Andrienko, Gennady; Andrienko, Natalia Re-interpreting rules interpretability (Journal Article) In: International Journal of Data Science and Analytics, pp. 1–21, 2023. @article{adilova2023re,
title = {Re-interpreting rules interpretability},
author = {Linara Adilova and Michael Kamp and Gennady Andrienko and Natalia Andrienko},
year = {2023},
date = {2023-01-01},
journal = {International Journal of Data Science and Analytics},
pages = {1\textendash21},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Li, Jianning; Ferreira, André; Puladi, Behrus; Alves, Victor; Kamp, Michael; Kim, Moon; Nensa, Felix; Kleesiek, Jens; Ahmadi, Seyed-Ahmad; Egger, Jan Open-source skull reconstruction with MONAI (Journal Article) In: SoftwareX, vol. 23, pp. 101432, 2023. @article{li2023open,
title = {Open-source skull reconstruction with MONAI},
author = {Jianning Li and Andr\'{e} Ferreira and Behrus Puladi and Victor Alves and Michael Kamp and Moon Kim and Felix Nensa and Jens Kleesiek and Seyed-Ahmad Ahmadi and Jan Egger},
year = {2023},
date = {2023-01-01},
journal = {SoftwareX},
volume = {23},
pages = {101432},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
2022
|
Michael Kamp Amr Abourayya, Erman Ayday AIMHI: Protecting Sensitive Data through Federated Co-Training (Workshop) 2022. @workshop{abourayya2022aimhi,
title = {AIMHI: Protecting Sensitive Data through Federated Co-Training},
author = {Amr Abourayya, Michael Kamp, Erman Ayday, Jens Kleesiek, Kanishka Rao, Geoffrey I. Webb, Bharat Rao},
url = {http://trustworthyml.de/wp-content/uploads/2022/12/45_aimhi_protecting_sensitive_dat.pdf},
year = {2022},
date = {2022-12-02},
urldate = {2022-12-02},
howpublished = {FL-NeurIPS22},
keywords = {aimhi, co-training, deep learning, federated learning, privacy},
pubstate = {published},
tppubtype = {workshop}
}
|
Mian, Osman; Kaltenpoth, David; Kamp, Michael Regret-based Federated Causal Discovery (Proceedings Article) In: The KDD'22 Workshop on Causal Discovery, pp. 61–69, PMLR 2022. @inproceedings{mian2022regret,
title = {Regret-based Federated Causal Discovery},
author = {Osman Mian and David Kaltenpoth and Michael Kamp},
year = {2022},
date = {2022-01-01},
booktitle = {The KDD'22 Workshop on Causal Discovery},
pages = {61--69},
organization = {PMLR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Li, Jianning; Ferreira, André; Puladi, Behrus; Alves, Victor; Kamp, Michael; Kim, Moon-Sung; Nensa, Felix; Kleesiek, Jens; Ahmadi, Seyed-Ahmad; Egger, Jan Open-Source Skull Reconstruction with MONAI (Journal Article) In: arXiv preprint arXiv:2211.14051, 2022. @article{li2022open,
title = {Open-Source Skull Reconstruction with MONAI},
author = {Jianning Li and Andr\'{e} Ferreira and Behrus Puladi and Victor Alves and Michael Kamp and Moon-Sung Kim and Felix Nensa and Jens Kleesiek and Seyed-Ahmad Ahmadi and Jan Egger},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {arXiv preprint arXiv:2211.14051},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Wang, Junhong; Li, Yun; Zhou, Zhaoyu; Wang, Chengshun; Hou, Yijie; Zhang, Li; Xue, Xiangyang; Kamp, Michael; Zhang, Xiaolong; Chen, Siming When, Where and How does it fail? A Spatial-temporal Visual Analytics Approach for Interpretable Object Detection in Autonomous Driving (Journal Article) In: IEEE Transactions on Visualization and Computer Graphics, 2022. @article{wang2022and,
title = {When, Where and How does it fail? A Spatial-temporal Visual Analytics Approach for Interpretable Object Detection in Autonomous Driving},
author = {Junhong Wang and Yun Li and Zhaoyu Zhou and Chengshun Wang and Yijie Hou and Li Zhang and Xiangyang Xue and Michael Kamp and Xiaolong Zhang and Siming Chen},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Mian, Osman; Kaltenpoth, David; Kamp, Michael Regret-based Federated Causal Discovery (Proceedings Article) In: The KDD'22 Workshop on Causal Discovery, pp. 61–69, PMLR 2022. @inproceedings{mian2022regretb,
title = {Regret-based Federated Causal Discovery},
author = {Osman Mian and David Kaltenpoth and Michael Kamp},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {The KDD'22 Workshop on Causal Discovery},
pages = {61--69},
organization = {PMLR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2021
|
Linsner, Florian; Adilova, Linara; Däubener, Sina; Kamp, Michael; Fischer, Asja Approaches to Uncertainty Quantification in Federated Deep Learning (Workshop) Machine Learning and Principles and Practice of Knowledge Discovery in Databases: International Workshops of ECML PKDD 2021, vol. 2, Springer, 2021. @workshop{linsner2021uncertainty,
title = {Approaches to Uncertainty Quantification in Federated Deep Learning},
author = {Florian Linsner and Linara Adilova and Sina D\"{a}ubener and Michael Kamp and Asja Fischer},
url = {https://michaelkamp.org/wp-content/uploads/2022/04/federatedUncertainty.pdf},
year = {2021},
date = {2021-09-17},
urldate = {2021-09-17},
booktitle = {Machine Learning and Principles and Practice of Knowledge Discovery in Databases: International Workshops of ECML PKDD 2021},
issuetitle = {Workshop on Parallel, Distributed, and Federated Learning},
volume = {2},
pages = {128-145},
publisher = {Springer},
keywords = {federated learning, uncertainty},
pubstate = {published},
tppubtype = {workshop}
}
|
Li, Xiaoxiao; Jiang, Meirui; Zhang, Xiaofei; Kamp, Michael; Dou, Qi FedBN: Federated Learning on Non-IID Features via Local Batch Normalization (Proceedings Article) In: Proceedings of the 9th International Conference on Learning Representations (ICLR), 2021. @inproceedings{li2021fedbn,
title = {FedBN: Federated Learning on Non-IID Features via Local Batch Normalization},
author = {Xiaoxiao Li and Meirui Jiang and Xiaofei Zhang and Michael Kamp and Qi Dou},
url = {https://michaelkamp.org/wp-content/uploads/2021/05/fedbn_federated_learning_on_non_iid_features_via_local_batch_normalization.pdf
https://michaelkamp.org/wp-content/uploads/2021/05/FedBN_appendix.pdf},
year = {2021},
date = {2021-05-03},
urldate = {2021-05-03},
booktitle = {Proceedings of the 9th International Conference on Learning Representations (ICLR)},
abstract = {The emerging paradigm of federated learning (FL) strives to enable collaborative training of deep models on the network edge without centrally aggregating raw data and hence improving data privacy. In most cases, the assumption of independent and identically distributed samples across local clients does not hold for federated learning setups. Under this setting, neural network training performance may vary significantly according to the data distribution and even hurt training convergence. Most of the previous work has focused on a difference in the distribution of labels or client shifts. Unlike those settings, we address an important problem of FL, e.g., different scanners/sensors in medical imaging, different scenery distribution in autonomous driving (highway vs. city), where local clients store examples with different distributions compared to other clients, which we denote as feature shift non-iid. In this work, we propose an effective method that uses local batch normalization to alleviate the feature shift before averaging models. The resulting scheme, called FedBN, outperforms both classical FedAvg, as well as the state-of-the-art for non-iid data (FedProx) on our extensive experiments. These empirical results are supported by a convergence analysis that shows in a simplified setting that FedBN has a faster convergence rate than FedAvg. Code is available at https://github.com/med-air/FedBN.},
keywords = {batch normalization, black-box parallelization, deep learning, federated learning},
pubstate = {published},
tppubtype = {inproceedings}
}
The emerging paradigm of federated learning (FL) strives to enable collaborative training of deep models on the network edge without centrally aggregating raw data and hence improving data privacy. In most cases, the assumption of independent and identically distributed samples across local clients does not hold for federated learning setups. Under this setting, neural network training performance may vary significantly according to the data distribution and even hurt training convergence. Most of the previous work has focused on a difference in the distribution of labels or client shifts. Unlike those settings, we address an important problem of FL, e.g., different scanners/sensors in medical imaging, different scenery distribution in autonomous driving (highway vs. city), where local clients store examples with different distributions compared to other clients, which we denote as feature shift non-iid. In this work, we propose an effective method that uses local batch normalization to alleviate the feature shift before averaging models. The resulting scheme, called FedBN, outperforms both classical FedAvg, as well as the state-of-the-art for non-iid data (FedProx) on our extensive experiments. These empirical results are supported by a convergence analysis that shows in a simplified setting that FedBN has a faster convergence rate than FedAvg. Code is available at https://github.com/med-air/FedBN. |
Petzka, Henning; Kamp, Michael; Adilova, Linara; Sminchisescu, Cristian; Boley, Mario Relative flatness and generalization (Journal Article) In: Advances in Neural Information Processing Systems, vol. 34, pp. 18420–18432, 2021. @article{petzka2021relative,
title = {Relative flatness and generalization},
author = {Henning Petzka and Michael Kamp and Linara Adilova and Cristian Sminchisescu and Mario Boley},
year = {2021},
date = {2021-01-01},
journal = {Advances in Neural Information Processing Systems},
volume = {34},
pages = {18420--18432},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|