2023

Nyholm, Sven
Artificial Intelligence, Humanoid Robots, and Old and New Control Problems Book Chapter
In: Hakli, R.; Mäkelä, P.; Seibt, J. (Ed.): pp. 3-12, IOS Press, 2023.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Control, Humanoid robots, The control problem
@inbook{nokey,
title = {Artificial Intelligence, Humanoid Robots, and Old and New Control Problems},
author = {Sven Nyholm},
editor = {R. Hakli and P. Mäkelä and J. Seibt},
doi = {10.3233/FAIA220594},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
pages = {3-12},
publisher = {IOS Press},
abstract = {In this paper, I discuss what I call a new control problem related to AI in the form of humanoid robots, and I compare it to what I call the old control problem related to AI more generally. The old control problem – discussed by authors such as Alan Turing, Norbert Wiener, and Roman Yampolskiy – concerns a worry that we might lose control over advanced AI technologies, which is seen as something that would be instrumentally bad. The new control problem is that there might be certain types of AI technologies – in particular, AI technologies in the form of lifelike humanoid robots – where there might be something problematic, at least from a symbolic point of view, about wanting to completely control these robots. The reason for this is that such robots might be seen as symbolizing human persons and because wanting to control such robots might therefore be seen as symbolizing something non-instrumentally bad: persons controlling other persons. A more general statement of the new control problem is to say that it is the problem of describing under what circumstances having complete control over AI technologies is unambiguously good from an ethical point of view. This paper sketches an answer to this by also discussing AI technologies that do not take the form of humanoid robots and that are such that control over them can be conceptualized as a form of extended self-control.},
keywords = {Artificial intelligence, Control, Humanoid robots, The control problem},
pubstate = {published},
tppubtype = {inbook}
}

Friedman, Cindy
Granting Negative Rights to Humanoid Robots Journal Article
In: Frontiers in Artificial Intelligence and Applications, vol. 366, pp. 145-154, 2023.
Abstract | Links | BibTeX | Tags: Humanoid robots, Negative rights
@article{nokey,
title = {Granting Negative Rights to Humanoid Robots},
author = {Cindy Friedman},
url = {https://ebooks.iospress.nl/doi/10.3233/FAIA220613},
doi = {10.3233/FAIA220613},
year = {2023},
date = {2023-01-01},
journal = {Frontiers in Artificial Intelligence and Applications},
volume = {366},
pages = {145-154},
abstract = {The paper argues that we should grant negative rights to humanoid robots. These are rights that relate to non-interference e.g., freedom from violence, or freedom from discrimination. Doing so will prevent moral degradation to our human society. The consideration of robot moral status has seen a progression towards the consideration of robot rights. This is a controversial debate, with many scholars seeing the consideration of robot rights in black and white. It is, however, valuable to take a nuanced approach. This paper highlights the value of taking a nuanced approach by arguing that we should consider negative rights for humanoid robots. Where a lot of discussion about robot rights centres around the possibility of robot consciousness which would warrant robots being protected by rights for their own moral sakes, the paper takes a human-centred approach. It argues that we should, at least, grant negative rights to humanoid robots for the sake of human beings and not necessarily only for the sake of robots. This is because, given the human-likeness of humanoid robots, we relate to them in a human-like way. Should we, in the context of these relations, treat these robots immorally, there is concern that we may damage our own moral fibre or, more collectively, society’s moral fibre. Thus, inhibiting the immoral treatment of robots, protects the moral fibre of society, thereby preventing moral degradation in our human society.},
keywords = {Humanoid robots, Negative rights},
pubstate = {published},
tppubtype = {article}
}
2022

Nyholm, Sven
A new control problem? Humanoid robots, artificial intelligence, and the value of control Journal Article
In: AI and Ethics, 2022.
Links | BibTeX | Tags: Artificial intelligence, Control, Extended agency, Humanoid robots, Self-control, The control problem
@article{nokey,
title = {A new control problem? Humanoid robots, artificial intelligence, and the value of control},
author = {Sven Nyholm},
doi = {10.1007/s43681-022-00231-y},
year = {2022},
date = {2022-11-07},
urldate = {2022-11-07},
journal = {AI and Ethics},
keywords = {Artificial intelligence, Control, Extended agency, Humanoid robots, Self-control, The control problem},
pubstate = {published},
tppubtype = {article}
}

Friedman, Cindy
Ethical concerns with replacing human relations with humanoid robots: an ubuntu perspective Journal Article
In: AI and Ethics, 2022.
Links | BibTeX | Tags: African philosophy, Human-robot interaction, Humanoid robots, Robot ethics, Ubuntu philosophy
@article{nokey,
title = {Ethical concerns with replacing human relations with humanoid robots: an ubuntu perspective},
author = {Cindy Friedman},
doi = {https://doi.org/10.1007/s43681-022-00186-0},
year = {2022},
date = {2022-06-20},
journal = {AI and Ethics},
keywords = {African philosophy, Human-robot interaction, Humanoid robots, Robot ethics, Ubuntu philosophy},
pubstate = {published},
tppubtype = {article}
}

Nyholm, Sven
The Ethics of Humanoid Sex Robots Book Chapter
In: Earp, Brian D.; Chambers, Clare; Watson, Lori (Ed.): pp. 574-585, Routledge, 2022.
Links | BibTeX | Tags: Ethics, Humanoid robots, Sex robots
@inbook{nokey,
title = {The Ethics of Humanoid Sex Robots},
author = {Sven Nyholm},
editor = {Brian D. Earp and Clare Chambers and Lori Watson},
doi = {10.4324/9781003286523-48s},
year = {2022},
date = {2022-05-24},
pages = {574-585},
publisher = {Routledge},
keywords = {Ethics, Humanoid robots, Sex robots},
pubstate = {published},
tppubtype = {inbook}
}
Perugia, Giulia; Guidi, Stefano; Bicchi, Margherita; Parlangeli, Oronzo
The Shape of Our Bias: Perceived Age and Gender in the Humanoid Robots of the ABOT Database Conference
ACM/IEEE International Conference on Human-Robot Interaction (HRI), vol. 17, IEEE, 2022.
Links | BibTeX | Tags: Bias, Human-robot interaction, Humanoid robots
@conference{nokey,
title = {The Shape of Our Bias: Perceived Age and Gender in the Humanoid Robots of the ABOT Database},
author = {Giulia Perugia and Stefano Guidi and Margherita Bicchi and Oronzo Parlangeli},
doi = {10.1109/HRI53351.2022.9889366},
year = {2022},
date = {2022-03-10},
urldate = {2022-03-10},
booktitle = {ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
volume = {17},
pages = {110-119},
publisher = {IEEE},
keywords = {Bias, Human-robot interaction, Humanoid robots},
pubstate = {published},
tppubtype = {conference}
}