2023

Sullivan, Emily
Do Machine Learning Models Represents Their Targets? Journal Article
In: Philosophy of Sciences, pp. 1-11, 2023.
Abstract | Links | BibTeX | Tags: Epistemology, Machine learning
@article{Sullivan2023,
title = {Do Machine Learning Models Represents Their Targets?},
author = {Emily Sullivan},
url = {https://www.esdit.nl/do-machine-learning-models-represent-their-targets/},
doi = {0.1017/psa.2023.151},
year = {2023},
date = {2023-10-20},
urldate = {2023-10-20},
journal = {Philosophy of Sciences},
pages = {1-11},
abstract = {I argue that machine learning (ML) models used in science function as highly idealized toy models. If we treat ML models as a type of highly idealized toy model, then we can deploy standard representational and epistemic strategies from the toy model literature to explain why ML models can still provide epistemic success despite their lack of similarity to their targets.},
keywords = {Epistemology, Machine learning},
pubstate = {published},
tppubtype = {article}
}
Malinverno, Luca; Barros, Vesna; Ghisoni, Francesco; Visonà, Giovanni; Kern, Roman; Nickel, Philip; Ventura, Barbara; Šimić, Ilija; Stryeck, Sarah; Manni, Francesca; Ferri, Cesar; Jean-Quartier, Claire; Genga, Laura; Schweikert, Gabriele; Lovrić, Mario; Rosen-Zvi, Michal
A historical perspective of biomedical explainable AI research Journal Article
In: Patterns, vol. 4, iss. 9, pp. 9, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness
@article{Malinverno2023,
title = {A historical perspective of biomedical explainable AI research},
author = {Luca Malinverno and Vesna Barros and Francesco Ghisoni and Giovanni Visonà and Roman Kern and Philip Nickel and Barbara Ventura and Ilija Šimić and Sarah Stryeck and Francesca Manni and Cesar Ferri and Claire Jean-Quartier and Laura Genga and Gabriele Schweikert and Mario Lovrić and Michal Rosen-Zvi},
url = {https://www.esdit.nl/1-s2-0-s266638992300199x-main/},
doi = {10.1016/j.patter.2023.100830 },
year = {2023},
date = {2023-09-08},
urldate = {2023-09-08},
journal = {Patterns},
volume = {4},
issue = {9},
pages = {9},
abstract = {The black-box nature of most artificial intelligence (AI) models encourages the development of explainability methods to engender trust into the AI decision-making process. Such methods can be broadly categorized into two main types: post hoc explanations and inherently interpretable algorithms. We aimed at analyzing the possible associations between COVID-19 and the push of explainable AI (XAI) to the forefront of biomedical research. We automatically extracted from the PubMed database biomedical XAI studies related to concepts of causality or explainability and manually labeled 1,603 papers with respect to XAI categories. To compare the trends pre- and post-COVID-19, we fit a change point detection model and evaluated significant changes in publication rates. We show that the advent of COVID-19 in the beginning of 2020 could be the driving factor behind an increased focus concerning XAI, playing a crucial role in accelerating an already evolving trend. Finally, we present a discussion with future societal use and impact of XAI technologies and potential future directions for those who pursue fostering clinical trust with interpretable machine learning models.},
keywords = {Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness},
pubstate = {published},
tppubtype = {article}
}
van de Poel, Ibo
AI, Control and Unintended Consequences: The Need for Meta-Values Book Section
In: Fritzsche, Albrecht; Santa-María, Andrés (Ed.): pp. 117-129, Springer, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Control, Experimentation, Machine ethics, Machine learning, Unintended consequences, Value sensitive design, Values
@incollection{nokey,
title = {AI, Control and Unintended Consequences: The Need for Meta-Values},
author = {Ibo van de Poel},
editor = {Albrecht Fritzsche and Andrés Santa-María},
url = {https://www.esdit.nl/ai-control-and-unintended/},
doi = {10.1007/978-3-031-25233-4_9},
year = {2023},
date = {2023-04-29},
pages = {117-129},
publisher = {Springer},
chapter = {9},
abstract = {Due to their self-learning and evolutionary character, AI (Artificial Intelligence) systems are more prone to unintended consequences and more difficult to control than traditional sociotechnical systems. To deal with this, machine ethicists have proposed to build moral (reasoning) capacities into AI systems by designing artificial moral agents. I argue that this may well lead to more, rather than less, unintended consequences and may decrease, rather than increase, human control over such systems. Instead, I suggest, we should bring AI systems under meaningful human control by formulating a number of meta-values for their evolution. Amongst others, this requires responsible experimentation with AI systems, which may neither guarantee full control nor the prevention of all undesirable consequences, but nevertheless ensures that AI systems, and their evolution, do not get out of control.},
keywords = {Artificial intelligence, Control, Experimentation, Machine ethics, Machine learning, Unintended consequences, Value sensitive design, Values},
pubstate = {published},
tppubtype = {incollection}
}
2022

Alfrink, Kars; Keller, Ianus; Kortuem, Gerd; Doorn, Neelke
Contestable AI by Design: Towards a Framework Journal Article
In: Minds and Machines, 2022.
Links | BibTeX | Tags: Artificial intelligence, Automated decision-making, Contestability, Design, Human-computer interaction, Machine learning, Sociotechnical systems
@article{nokey,
title = {Contestable AI by Design: Towards a Framework},
author = {Kars Alfrink and Ianus Keller and Gerd Kortuem and Neelke Doorn},
doi = {10.1007/s11023-022-09611-z},
year = {2022},
date = {2022-08-13},
journal = {Minds and Machines},
keywords = {Artificial intelligence, Automated decision-making, Contestability, Design, Human-computer interaction, Machine learning, Sociotechnical systems},
pubstate = {published},
tppubtype = {article}
}

Sullivan, Emily
Link Uncertainty, Implementation, and ML Opacity Book Chapter
In: Lawler, Insa; Khalifa, Kareem; Shech, Elay (Ed.): 2022.
Links | BibTeX | Tags: Link uncertainty, Machine learning
@inbook{nokey,
title = {Link Uncertainty, Implementation, and ML Opacity},
author = {Emily Sullivan},
editor = {Insa Lawler and Kareem Khalifa and Elay Shech},
url = {https://philpapers.org/archive/SULLUI.pdf},
year = {2022},
date = {2022-01-01},
series = {Scientific Understanding and Representation},
keywords = {Link uncertainty, Machine learning},
pubstate = {published},
tppubtype = {inbook}
}