2023
Malinverno, Luca; Barros, Vesna; Ghisoni, Francesco; Visonà, Giovanni; Kern, Roman; Nickel, Philip; Ventura, Barbara; Šimić, Ilija; Stryeck, Sarah; Manni, Francesca; Ferri, Cesar; Jean-Quartier, Claire; Genga, Laura; Schweikert, Gabriele; Lovrić, Mario; Rosen-Zvi, Michal
A historical perspective of biomedical explainable AI research Journal Article
In: Patterns, vol. 4, iss. 9, pp. 9, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness
@article{Malinverno2023,
title = {A historical perspective of biomedical explainable AI research},
author = {Luca Malinverno and Vesna Barros and Francesco Ghisoni and Giovanni Visonà and Roman Kern and Philip Nickel and Barbara Ventura and Ilija Šimić and Sarah Stryeck and Francesca Manni and Cesar Ferri and Claire Jean-Quartier and Laura Genga and Gabriele Schweikert and Mario Lovrić and Michal Rosen-Zvi},
url = {https://www.esdit.nl/1-s2-0-s266638992300199x-main/},
doi = {10.1016/j.patter.2023.100830 },
year = {2023},
date = {2023-09-08},
urldate = {2023-09-08},
journal = {Patterns},
volume = {4},
issue = {9},
pages = {9},
abstract = {The black-box nature of most artificial intelligence (AI) models encourages the development of explainability methods to engender trust into the AI decision-making process. Such methods can be broadly categorized into two main types: post hoc explanations and inherently interpretable algorithms. We aimed at analyzing the possible associations between COVID-19 and the push of explainable AI (XAI) to the forefront of biomedical research. We automatically extracted from the PubMed database biomedical XAI studies related to concepts of causality or explainability and manually labeled 1,603 papers with respect to XAI categories. To compare the trends pre- and post-COVID-19, we fit a change point detection model and evaluated significant changes in publication rates. We show that the advent of COVID-19 in the beginning of 2020 could be the driving factor behind an increased focus concerning XAI, playing a crucial role in accelerating an already evolving trend. Finally, we present a discussion with future societal use and impact of XAI technologies and potential future directions for those who pursue fostering clinical trust with interpretable machine learning models.},
keywords = {Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness},
pubstate = {published},
tppubtype = {article}
}
The black-box nature of most artificial intelligence (AI) models encourages the development of explainability methods to engender trust into the AI decision-making process. Such methods can be broadly categorized into two main types: post hoc explanations and inherently interpretable algorithms. We aimed at analyzing the possible associations between COVID-19 and the push of explainable AI (XAI) to the forefront of biomedical research. We automatically extracted from the PubMed database biomedical XAI studies related to concepts of causality or explainability and manually labeled 1,603 papers with respect to XAI categories. To compare the trends pre- and post-COVID-19, we fit a change point detection model and evaluated significant changes in publication rates. We show that the advent of COVID-19 in the beginning of 2020 could be the driving factor behind an increased focus concerning XAI, playing a crucial role in accelerating an already evolving trend. Finally, we present a discussion with future societal use and impact of XAI technologies and potential future directions for those who pursue fostering clinical trust with interpretable machine learning models.