2023
Malinverno, Luca; Barros, Vesna; Ghisoni, Francesco; Visonà, Giovanni; Kern, Roman; Nickel, Philip; Ventura, Barbara; Šimić, Ilija; Stryeck, Sarah; Manni, Francesca; Ferri, Cesar; Jean-Quartier, Claire; Genga, Laura; Schweikert, Gabriele; Lovrić, Mario; Rosen-Zvi, Michal
A historical perspective of biomedical explainable AI research Journal Article
In: Patterns, vol. 4, iss. 9, pp. 9, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness
@article{Malinverno2023,
title = {A historical perspective of biomedical explainable AI research},
author = {Luca Malinverno and Vesna Barros and Francesco Ghisoni and Giovanni Visonà and Roman Kern and Philip Nickel and Barbara Ventura and Ilija Šimić and Sarah Stryeck and Francesca Manni and Cesar Ferri and Claire Jean-Quartier and Laura Genga and Gabriele Schweikert and Mario Lovrić and Michal Rosen-Zvi},
url = {https://www.esdit.nl/1-s2-0-s266638992300199x-main/},
doi = {10.1016/j.patter.2023.100830 },
year = {2023},
date = {2023-09-08},
urldate = {2023-09-08},
journal = {Patterns},
volume = {4},
issue = {9},
pages = {9},
abstract = {The black-box nature of most artificial intelligence (AI) models encourages the development of explainability methods to engender trust into the AI decision-making process. Such methods can be broadly categorized into two main types: post hoc explanations and inherently interpretable algorithms. We aimed at analyzing the possible associations between COVID-19 and the push of explainable AI (XAI) to the forefront of biomedical research. We automatically extracted from the PubMed database biomedical XAI studies related to concepts of causality or explainability and manually labeled 1,603 papers with respect to XAI categories. To compare the trends pre- and post-COVID-19, we fit a change point detection model and evaluated significant changes in publication rates. We show that the advent of COVID-19 in the beginning of 2020 could be the driving factor behind an increased focus concerning XAI, playing a crucial role in accelerating an already evolving trend. Finally, we present a discussion with future societal use and impact of XAI technologies and potential future directions for those who pursue fostering clinical trust with interpretable machine learning models.},
keywords = {Artificial intelligence, Corona virus, COVID-19, Decision-making, Explainability, Foundation models, Machine learning, Meta-review, PRISMA, Trustworthiness},
pubstate = {published},
tppubtype = {article}
}

van de Poel, Ibo; Frank, Lily; Hermann, Julia; Hopster, Jeroen; Lenzi, Dominic; Nyholm, Sven; Taebi, Behnam; Ziliotti, Elena (Ed.)
Ethics of Socially Disruptive Technologies: An Introduction Book
Open Book Publishers, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial wombs, Climate engineering, Social media, Social robots, Society, Technology
@book{vandePoel2023,
title = {Ethics of Socially Disruptive Technologies: An Introduction},
editor = {Ibo van de Poel and Lily Frank and Julia Hermann and Jeroen Hopster and Dominic Lenzi and Sven Nyholm and Behnam Taebi and Elena Ziliotti},
doi = {https://doi.org/10.11647/OBP.0366},
year = {2023},
date = {2023-09-05},
urldate = {2023-09-05},
publisher = {Open Book Publishers},
abstract = {Technologies shape who we are, how we organize our societies and how we relate to nature. For example, social media challenges democracy; artificial intelligence raises the question of what is unique to humans; and the possibility to create artificial wombs may affect notions of motherhood and birth. Some have suggested that we address global warming by engineering the climate, but how does this impact our responsibility to future generations and our relation to nature?
This book shows how technologies can be socially and conceptually disruptive and investigates how to come to terms with this disruptive potential.
Four technologies are studied: social media, social robots, climate engineering and artificial wombs. The authors highlight the disruptive potential of these technologies, and the new questions this raises. The book also discusses responses to conceptual disruption, like conceptual engineering, the deliberate revision of concepts.},
keywords = {Artificial intelligence, artificial wombs, Climate engineering, Social media, Social robots, Society, Technology},
pubstate = {published},
tppubtype = {book}
}
This book shows how technologies can be socially and conceptually disruptive and investigates how to come to terms with this disruptive potential.
Four technologies are studied: social media, social robots, climate engineering and artificial wombs. The authors highlight the disruptive potential of these technologies, and the new questions this raises. The book also discusses responses to conceptual disruption, like conceptual engineering, the deliberate revision of concepts.

Hopster, Jeroen; Maas, Matthijs
The technology triad: disruptive AI, regulatory gaps and value change Journal Article
In: AI and Ethics, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Regulation, Social disruptive technologies, Technology ethics, Technology law, Value change
@article{nokey,
title = {The technology triad: disruptive AI, regulatory gaps and value change},
author = {Jeroen Hopster and Matthijs Maas },
url = {https://www.esdit.nl/the-technology-triad/},
doi = {doi.org/10.1007/s43681-023-00305-5 },
year = {2023},
date = {2023-06-28},
urldate = {2023-06-28},
journal = {AI and Ethics},
abstract = {Disruptive technologies can have far-reaching impacts on society. They may challenge or destabilize cherished ethical values and disrupt legal systems. There is a convergent interest among ethicists and legal scholars in such “second-order disruptions” to norm systems. Thus far, however, ethical and legal approaches to technological norm-disruption have remained largely siloed. In this paper, we propose to integrate the existing ‘dyadic’ models of disruptive change in the ethical and legal spheres, and shift focus to the relations between and mutual shaping of values, technology, and law. We argue that a ‘triadic’ values-technology-regulation model—“the technology triad”—is more descriptively accurate, as it allows a better mapping of second-order impacts of technological changes (on values and norms, through changes in legal systems—or on legal systems, through changes in values and norms). Simultaneously, a triadic model serves to highlight a broader portfolio of ethical, technical, or regulatory interventions that can enable effective ethical triage of—and a more resilient response to—such Socially Disruptive Technologies. We illustrate the application of the triadic framework with two cases, one historical (how the adoption of the GDPR channeled and redirected the evolution of the ethical value of ‘privacy’ when that had been put under pressure by digital markets), and one anticipatory (looking at anticipated disruptions caused by the ongoing wave of generative AI systems).},
keywords = {Artificial intelligence, Regulation, Social disruptive technologies, Technology ethics, Technology law, Value change},
pubstate = {published},
tppubtype = {article}
}

Löhr, Guido
If conceptual engineering is a new method in the ethics of AI, what method is it exactly? Journal Article
In: AI and Ethics, 2023.
Abstract | Links | BibTeX | Tags: AI Ethics, Artificial intelligence, Conceptual engineering, Conceptual ethics, Pragmatism, Representationalism
@article{nokey,
title = {If conceptual engineering is a new method in the ethics of AI, what method is it exactly?},
author = {Guido Löhr},
doi = {10.1007/s43681-023-00295-4},
year = {2023},
date = {2023-05-16},
urldate = {2023-05-16},
journal = {AI and Ethics},
abstract = {Can a machine be a person? Can a robot think, be our friend or colleague? These familiar questions in the ethics of AI have recently become much more urgent than many philosophers anticipated. However, they also seem as intractable as ever. For this reason, several philosophers of AI have recently turned their attention to an arguably new method: conceptual engineering. The idea is to stop searching for the real essence of friendship or our ordinary concept of the person. Instead, ethicists of AI should engineer concepts of friend or person we should apply. But what exactly is this method? There is currently no consensus on what the target object of conceptual engineers is or should be. In this paper, I reject a number of popular options and then argue for a pragmatist way of thinking about the target object of conceptual engineering in the ethics of AI. I conclude that in this pragmatist picture, conceptual engineering is probably what we have been doing all along. So, is it all just hype? No, the idea that the ethics of AI has been dominated by conceptual engineers all along constitutes an important meta-philosophical insight. We can build on this insight to develop a more rigorous and thorough methodology in the ethics of AI.},
keywords = {AI Ethics, Artificial intelligence, Conceptual engineering, Conceptual ethics, Pragmatism, Representationalism},
pubstate = {published},
tppubtype = {article}
}
van de Poel, Ibo
AI, Control and Unintended Consequences: The Need for Meta-Values Book Section
In: Fritzsche, Albrecht; Santa-María, Andrés (Ed.): pp. 117-129, Springer, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Control, Experimentation, Machine ethics, Machine learning, Unintended consequences, Value sensitive design, Values
@incollection{nokey,
title = {AI, Control and Unintended Consequences: The Need for Meta-Values},
author = {Ibo van de Poel},
editor = {Albrecht Fritzsche and Andrés Santa-María},
url = {https://www.esdit.nl/ai-control-and-unintended/},
doi = {10.1007/978-3-031-25233-4_9},
year = {2023},
date = {2023-04-29},
pages = {117-129},
publisher = {Springer},
chapter = {9},
abstract = {Due to their self-learning and evolutionary character, AI (Artificial Intelligence) systems are more prone to unintended consequences and more difficult to control than traditional sociotechnical systems. To deal with this, machine ethicists have proposed to build moral (reasoning) capacities into AI systems by designing artificial moral agents. I argue that this may well lead to more, rather than less, unintended consequences and may decrease, rather than increase, human control over such systems. Instead, I suggest, we should bring AI systems under meaningful human control by formulating a number of meta-values for their evolution. Amongst others, this requires responsible experimentation with AI systems, which may neither guarantee full control nor the prevention of all undesirable consequences, but nevertheless ensures that AI systems, and their evolution, do not get out of control.},
keywords = {Artificial intelligence, Control, Experimentation, Machine ethics, Machine learning, Unintended consequences, Value sensitive design, Values},
pubstate = {published},
tppubtype = {incollection}
}

Nyholm, Sven
Artificial Intelligence, Humanoid Robots, and Old and New Control Problems Book Chapter
In: Hakli, R.; Mäkelä, P.; Seibt, J. (Ed.): pp. 3-12, IOS Press, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Control, Humanoid robots, The control problem
@inbook{nokey,
title = {Artificial Intelligence, Humanoid Robots, and Old and New Control Problems},
author = {Sven Nyholm},
editor = {R. Hakli and P. Mäkelä and J. Seibt},
doi = {10.3233/FAIA220594},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
pages = {3-12},
publisher = {IOS Press},
abstract = {In this paper, I discuss what I call a new control problem related to AI in the form of humanoid robots, and I compare it to what I call the old control problem related to AI more generally. The old control problem – discussed by authors such as Alan Turing, Norbert Wiener, and Roman Yampolskiy – concerns a worry that we might lose control over advanced AI technologies, which is seen as something that would be instrumentally bad. The new control problem is that there might be certain types of AI technologies – in particular, AI technologies in the form of lifelike humanoid robots – where there might be something problematic, at least from a symbolic point of view, about wanting to completely control these robots. The reason for this is that such robots might be seen as symbolizing human persons and because wanting to control such robots might therefore be seen as symbolizing something non-instrumentally bad: persons controlling other persons. A more general statement of the new control problem is to say that it is the problem of describing under what circumstances having complete control over AI technologies is unambiguously good from an ethical point of view. This paper sketches an answer to this by also discussing AI technologies that do not take the form of humanoid robots and that are such that control over them can be conceptualized as a form of extended self-control.},
keywords = {Artificial intelligence, Control, Humanoid robots, The control problem},
pubstate = {published},
tppubtype = {inbook}
}
2022

Nyholm, Sven
A new control problem? Humanoid robots, artificial intelligence, and the value of control Journal Article
In: AI and Ethics, 2022.
Links | BibTeX | Tags: Artificial intelligence, Control, Extended agency, Humanoid robots, Self-control, The control problem
@article{nokey,
title = {A new control problem? Humanoid robots, artificial intelligence, and the value of control},
author = {Sven Nyholm},
doi = {10.1007/s43681-022-00231-y},
year = {2022},
date = {2022-11-07},
urldate = {2022-11-07},
journal = {AI and Ethics},
keywords = {Artificial intelligence, Control, Extended agency, Humanoid robots, Self-control, The control problem},
pubstate = {published},
tppubtype = {article}
}
O'Neill, Elizabeth; Klincewicz, Michal; Kemmer, Michiel
Ethical Issues with Artificial Ethics Assistants Book Chapter
In: Véliz, Carissa (Ed.): Oxford Academic, 2022.
Links | BibTeX | Tags: Artificial ethics advisor, Artificial ethics assistant, Artificial intelligence, Moral cognition, Moral decision-making, Moral enhancement
@inbook{nokey,
title = {Ethical Issues with Artificial Ethics Assistants},
author = {Elizabeth O'Neill and Michal Klincewicz and Michiel Kemmer},
editor = {Carissa Véliz},
doi = {10.1093/oxfordhb/9780198857815.013.17},
year = {2022},
date = {2022-10-20},
urldate = {2022-10-20},
publisher = {Oxford Academic},
keywords = {Artificial ethics advisor, Artificial ethics assistant, Artificial intelligence, Moral cognition, Moral decision-making, Moral enhancement},
pubstate = {published},
tppubtype = {inbook}
}
Royakkers, Lambèr; Huigsloot, Nathalie
‘De mens wordt steeds meer een robot’ Journal Article
In: HP De Tijd, 2022.
Links | BibTeX | Tags: Artificial intelligence
@article{nokey,
title = {‘De mens wordt steeds meer een robot’},
author = {Lambèr Royakkers and Nathalie Huigsloot},
url = {https://www.hpdetijd.nl/2022-08-28/de-mens-wordt-steeds-meer-een-robot/},
year = {2022},
date = {2022-08-28},
journal = {HP De Tijd},
keywords = {Artificial intelligence},
pubstate = {published},
tppubtype = {article}
}

Alfrink, Kars; Keller, Ianus; Kortuem, Gerd; Doorn, Neelke
Contestable AI by Design: Towards a Framework Journal Article
In: Minds and Machines, 2022.
Links | BibTeX | Tags: Artificial intelligence, Automated decision-making, Contestability, Design, Human-computer interaction, Machine learning, Sociotechnical systems
@article{nokey,
title = {Contestable AI by Design: Towards a Framework},
author = {Kars Alfrink and Ianus Keller and Gerd Kortuem and Neelke Doorn},
doi = {10.1007/s11023-022-09611-z},
year = {2022},
date = {2022-08-13},
journal = {Minds and Machines},
keywords = {Artificial intelligence, Automated decision-making, Contestability, Design, Human-computer interaction, Machine learning, Sociotechnical systems},
pubstate = {published},
tppubtype = {article}
}
Kazim, Emre; Fenoglio, Enzo; Hilliard, Airlie; Koshiyama, Adriano; Mulligan, Catherine; Trengove, Markus; Gilbert, Abigail; Gwagwa, Arthur; Almeida, Denise; Gosdiff, Phil; Porayska-Pomsta, Kaska
On the sui generis value capture of new digital technologies: The case of AI Journal Article
In: Patterns, vol. 3, iss. 7, no. 100526, 2022.
Links | BibTeX | Tags: Artificial intelligence, Digital assets, Information theory, Ontology, Value theory
@article{nokey,
title = {On the sui generis value capture of new digital technologies: The case of AI},
author = {Emre Kazim and Enzo Fenoglio and Airlie Hilliard and Adriano Koshiyama and Catherine Mulligan and Markus Trengove and Abigail Gilbert and Arthur Gwagwa and Denise Almeida and Phil Gosdiff and Kaska Porayska-Pomsta},
doi = {https://doi.org/10.1016/j.patter.2022.100526},
year = {2022},
date = {2022-07-08},
journal = {Patterns},
volume = {3},
number = {100526},
issue = {7},
keywords = {Artificial intelligence, Digital assets, Information theory, Ontology, Value theory},
pubstate = {published},
tppubtype = {article}
}
Lee, Minha; Frank, Lily; de Kort, Yvonne A. W.; IJsselsteijn, Wijnand A.
Where is Vincent? Expanding our emotional selves with AI Conference
CUI '22: Proceedings of the 4th Conference on Conversational User Interfaces, no. 19, 2022.
Links | BibTeX | Tags: Artificial intelligence, Emotional selves
@conference{nokey,
title = {Where is Vincent? Expanding our emotional selves with AI},
author = {Minha Lee and Lily Frank and Yvonne A.W. de Kort and Wijnand A. IJsselsteijn},
doi = {10.1145/3543829.3543835},
year = {2022},
date = {2022-07-01},
booktitle = {CUI '22: Proceedings of the 4th Conference on Conversational User Interfaces},
number = {19},
pages = {1-11},
keywords = {Artificial intelligence, Emotional selves},
pubstate = {published},
tppubtype = {conference}
}

Siebert, Luciano Cavalcante; Lupetti, Maria Luce; Aizenberg, Evgeni; Beckers, Niek; Zgonnikov, Arkady; Veluwenkamp, Herman; Abbink, David; Giaccardi, Elisa; Houben, Geert-Jan; Jonker, Catholijn; van den Hoven, Jeroen; Forster, Deborah; Lagendijk, Reginald L.
Meaningful human control: Actionable properties for AI system development Bachelor Thesis
2022.
Links | BibTeX | Tags: AI Ethics, Artificial intelligence, Meaningful human control, Moral responsibility, Socio-technical systems
@bachelorthesis{nokey,
title = {Meaningful human control: Actionable properties for AI system development},
author = {Luciano Cavalcante Siebert and Maria Luce Lupetti and Evgeni Aizenberg and Niek Beckers and Arkady Zgonnikov and Herman Veluwenkamp and David Abbink and Elisa Giaccardi and Geert-Jan Houben and Catholijn Jonker and Jeroen van den Hoven and Deborah Forster and Reginald L. Lagendijk},
doi = {10.1007/s43681-022-00167-3},
year = {2022},
date = {2022-05-18},
urldate = {2022-05-18},
journal = {AI and Ethics},
volume = {3},
pages = {241-255},
keywords = {AI Ethics, Artificial intelligence, Meaningful human control, Moral responsibility, Socio-technical systems},
pubstate = {published},
tppubtype = {bachelorthesis}
}

Gwagwa, Arthur; Kazim, Emre; Hilliard, Airlie
The role of the African value of Ubuntu in global AI inclusion discourse: A normative ethics perspective Journal Article
In: Patterns, vol. 3, iss. 4, no. 100462, 2022.
Links | BibTeX | Tags: African ethics and values, AI Ethics, Artificial intelligence, Ubuntu philosophy, Utilitarianism
@article{nokey,
title = {The role of the African value of Ubuntu in global AI inclusion discourse: A normative ethics perspective},
author = {Arthur Gwagwa and Emre Kazim and Airlie Hilliard},
doi = {https://doi.org/10.1016/j.patter.2022.100462},
year = {2022},
date = {2022-04-08},
urldate = {2022-04-08},
journal = {Patterns},
volume = {3},
number = {100462},
issue = {4},
keywords = {African ethics and values, AI Ethics, Artificial intelligence, Ubuntu philosophy, Utilitarianism},
pubstate = {published},
tppubtype = {article}
}

Alfrink, Kars; Keller, Ianus; Doorn, Neelke; Kortuem, Gerd
Tensions in transparent urban AI: designing a smart electric vehicle charge point Journal Article
In: AI & Society, 2022.
Links | BibTeX | Tags: Artificial intelligence, Electric vehicles, Transparency, Urban AI
@article{nokey,
title = {Tensions in transparent urban AI: designing a smart electric vehicle charge point},
author = {Kars Alfrink and Ianus Keller and Neelke Doorn and Gerd Kortuem},
doi = {10.1007/s00146-022-01436-9},
year = {2022},
date = {2022-03-31},
journal = {AI & Society},
keywords = {Artificial intelligence, Electric vehicles, Transparency, Urban AI},
pubstate = {published},
tppubtype = {article}
}

Nickel, Philip
Trust in medical artificial intelligence: a discretionary account Journal Article
In: Ethics and Information Technology, vol. 24, no. 7, 2022.
Links | BibTeX | Tags: Artificial intelligence, Discretion, Future of medicine, Normative expectations, Trust in AI
@article{nokey,
title = {Trust in medical artificial intelligence: a discretionary account},
author = {Philip Nickel},
doi = {10.1007/s10676-022-09630-5},
year = {2022},
date = {2022-01-24},
journal = {Ethics and Information Technology},
volume = {24},
number = {7},
keywords = {Artificial intelligence, Discretion, Future of medicine, Normative expectations, Trust in AI},
pubstate = {published},
tppubtype = {article}
}
2021

Gwagwa, Arthur; Kazim, Emre; Kachidza, Patti; Hilliard, Airlie; Siminyu, Kathleen; Smith, Matthew; Shawe-Taylor, John
Road map for research on responsible artificial intelligence for development (AI4D) in African countries: The case study of agriculture Journal Article
In: Perspective, vol. 2, iss. 12, no. 100381, 2021.
Links | BibTeX | Tags: Africa, AI Ethics, Artificial intelligence, Development, Responsible AI
@article{Gwagwa2021,
title = {Road map for research on responsible artificial intelligence for development (AI4D) in African countries: The case study of agriculture},
author = {Arthur Gwagwa and Emre Kazim and Patti Kachidza and Airlie Hilliard and Kathleen Siminyu and Matthew Smith and John Shawe-Taylor},
doi = {10.1016/j.patter.2021.100381},
year = {2021},
date = {2021-12-10},
journal = {Perspective},
volume = {2},
number = {100381},
issue = {12},
keywords = {Africa, AI Ethics, Artificial intelligence, Development, Responsible AI},
pubstate = {published},
tppubtype = {article}
}

Nyholm, Sven
The World’s Most Dangerous Idea? Transhumanism in the Age of Artificial Intelligence, Climate Change, and Existential Risk. Some Comments on Stefan Lorenz Sorgner’s On Transhumanism Journal Article
In: Deliberatio Studies in Contemporary Philosophical Challenges, vol. 1, iss. 1, pp. 77-86, 2021, ISBN: 2810 – 5532.
BibTeX | Tags: Artificial intelligence, Climate change, Existential risks, Transhumanism, Values
@article{Nyholm2021c,
title = {The World’s Most Dangerous Idea? Transhumanism in the Age of Artificial Intelligence, Climate Change, and Existential Risk. Some Comments on Stefan Lorenz Sorgner’s On Transhumanism},
author = {Sven Nyholm},
isbn = { 2810 – 5532},
year = {2021},
date = {2021-12-01},
journal = {Deliberatio Studies in Contemporary Philosophical Challenges},
volume = {1},
issue = {1},
pages = {77-86},
keywords = {Artificial intelligence, Climate change, Existential risks, Transhumanism, Values},
pubstate = {published},
tppubtype = {article}
}

Danaher, John; Nyholm, Sven
Automation, work and the achievement gap Journal Article
In: AI and Ethics, vol. 1, iss. 3, pp. 227-237, 2021.
Links | BibTeX | Tags: Achievement, Artificial intelligence, Automation, Autonomy, Community, Mastery, Meaningful work, Responsibility gap
@article{Danaher2021,
title = {Automation, work and the achievement gap},
author = {John Danaher and Sven Nyholm},
doi = {10.1007/s43681-020-00028-x},
year = {2021},
date = {2021-08-01},
journal = {AI and Ethics},
volume = {1},
issue = {3},
pages = {227-237},
keywords = {Achievement, Artificial intelligence, Automation, Autonomy, Community, Mastery, Meaningful work, Responsibility gap},
pubstate = {published},
tppubtype = {article}
}

Rutenberg, Isaac; Gwagwa, Arthur; Omino, Melissa
Use and Impact of Artificial Intelligence on Climate Change Adaptation in Africa Journal Article
In: African Handbook of Climate Change Adaptation, pp. 1107-1126, 2021.
Links | BibTeX | Tags: Adaptation, Africa, Algorithms, Artificial intelligence, Climate change, Data, Migration
@article{Rutenberg2021,
title = {Use and Impact of Artificial Intelligence on Climate Change Adaptation in Africa},
author = {Isaac Rutenberg and Arthur Gwagwa and Melissa Omino},
editor = {Walter Leal Filho and Nicholas Oguge and Desalegn Ayal and Lydia Adeleke and Izael da Silva},
doi = {10.1007/978-3-030-45106-6_80},
year = {2021},
date = {2021-06-01},
journal = {African Handbook of Climate Change Adaptation},
pages = {1107-1126},
keywords = {Adaptation, Africa, Algorithms, Artificial intelligence, Climate change, Data, Migration},
pubstate = {published},
tppubtype = {article}
}

Doorn, Neelke
Artificial Intelligence in the Water Domain. Opportunities for Responsible Use Journal Article
In: Science of The Total Environment, vol. 755, iss. 1, no. 142561, 2021.
Links | BibTeX | Tags: Artificial intelligence, Data science, Ethics, Many-objective optimisation, Responsible AI, Water domain
@article{Doorn2021,
title = {Artificial Intelligence in the Water Domain. Opportunities for Responsible Use},
author = {Neelke Doorn},
doi = {10.1016/j.scitotenv.2020.142561},
year = {2021},
date = {2021-02-10},
urldate = {2021-02-10},
journal = {Science of The Total Environment},
volume = {755},
number = {142561},
issue = {1},
keywords = {Artificial intelligence, Data science, Ethics, Many-objective optimisation, Responsible AI, Water domain},
pubstate = {published},
tppubtype = {article}
}

Gordon, John-Stewart; Nyholm, Sven
Ethics of Artificial Intelligence Online
2021.
Links | BibTeX | Tags: Artificial intelligence, Ethics
@online{nokey,
title = {Ethics of Artificial Intelligence},
author = {John-Stewart Gordon and Sven Nyholm},
url = {https://iep.utm.edu/ethics-of-artificial-intelligence/},
year = {2021},
date = {2021-01-01},
issue = {Internet Encyclopedia of Philosophy},
keywords = {Artificial intelligence, Ethics},
pubstate = {published},
tppubtype = {online}
}

Stahl, Bernd Carsten; Andreou, Andreas; Brey, Philip; Hatzakis, Tally; Kirichenko, Alexey; Macnisch, Kevin; Shaelou, S. Laulhé; Patel, Andrew; Ryan, Mark; Wright, David
Artificial intelligence for human flourishing – Beyond principles for machine learning Journal Article
In: Journal of Business Research, vol. 124, iss. Artificial Intelligence, pp. 374-388, 2021.
Links | BibTeX | Tags: Artificial intelligence, Big data, Ethics, Governance, Human rights
@article{Stahl2021,
title = {Artificial intelligence for human flourishing – Beyond principles for machine learning},
author = {Bernd Carsten Stahl and Andreas Andreou and Philip Brey and Tally Hatzakis and Alexey Kirichenko and Kevin Macnisch and S. Laulhé Shaelou and Andrew Patel and Mark Ryan and David Wright},
doi = {10.1016/j.jbusres.2020.11.030 },
year = {2021},
date = {2021-01-01},
journal = {Journal of Business Research},
volume = {124},
issue = {Artificial Intelligence},
pages = {374-388},
keywords = {Artificial intelligence, Big data, Ethics, Governance, Human rights},
pubstate = {published},
tppubtype = {article}
}
2020
van de Poel, Ibo
In: Human Affairs, vol. 30, iss. 4, pp. 499-511, 2020.
Links | BibTeX | Tags: Artificial intelligence, Co-evolution, Philosophy, Society, Technological determinism, Technology, Values
@article{nokey,
title = {Three philosophical perspectives on the relation between technology and society, and how they affect the current debate about artificial intelligence},
author = {Ibo van de Poel},
doi = {10.1515/humaff-2020-0042},
year = {2020},
date = {2020-10-09},
urldate = {2020-10-09},
journal = {Human Affairs},
volume = {30},
issue = {4},
pages = {499-511},
keywords = {Artificial intelligence, Co-evolution, Philosophy, Society, Technological determinism, Technology, Values},
pubstate = {published},
tppubtype = {article}
}