@incollection{FreyerKempt2023, author = {Freyer, Nils and Kempt, Hendrik}, title = {AI-DSS in healthcare and their power over health-insecure collectives}, series = {Justice in global health}, booktitle = {Justice in global health}, editor = {Bhakuni, Himani and Miotto, Lucas}, publisher = {Routledge}, address = {London}, isbn = {9781003399933}, doi = {10.4324/9781003399933-4}, pages = {38 -- 55}, year = {2023}, abstract = {AI-based systems are nearing ubiquity not only in everyday low-stakes activities but also in medical procedures. To protect patients and physicians alike, explainability requirements have been proposed for the operation of AI-based decision support systems (AI-DSS), which adds hurdles to the productive use of AI in clinical contexts. This raises two questions: Who decides these requirements? And how should access to AI-DSS be provided to communities that reject these standards (particularly when such communities are expert-scarce)? This chapter investigates a dilemma that emerges from the implementation of global AI governance. While rejecting global AI governance limits the ability to help communities in need, global AI governance risks undermining and subjecting health-insecure communities to the force of the neo-colonial world order. For this, this chapter first surveys the current landscape of AI governance and introduces the approach of relational egalitarianism as key to (global health) justice. To discuss the two horns of the referred dilemma, the core power imbalances faced by health-insecure collectives (HICs) are examined. The chapter argues that only strong demands of a dual strategy towards health-secure collectives can both remedy the immediate needs of HICs and enable them to become healthcare independent.}, language = {en} } @article{KemptFreyerNagel2022, author = {Kempt, Hendrik and Freyer, Nils and Nagel, Saskia K.}, title = {Justice and the normative standards of explainability in healthcare}, series = {Philosophy \& Technology}, volume = {35}, journal = {Philosophy \& Technology}, number = {Article number: 100}, publisher = {Springer Nature}, address = {Berlin}, doi = {10.1007/s13347-022-00598-0}, pages = {1 -- 19}, year = {2022}, abstract = {Providing healthcare services frequently involves cognitively demanding tasks, including diagnoses and analyses as well as complex decisions about treatments and therapy. From a global perspective, ethically significant inequalities exist between regions where the expert knowledge required for these tasks is scarce or abundant. One possible strategy to diminish such inequalities and increase healthcare opportunities in expert-scarce settings is to provide healthcare solutions involving digital technologies that do not necessarily require the presence of a human expert, e.g., in the form of artificial intelligent decision-support systems (AI-DSS). Such algorithmic decision-making, however, is mostly developed in resource- and expert-abundant settings to support healthcare experts in their work. As a practical consequence, the normative standards and requirements for such algorithmic decision-making in healthcare require the technology to be at least as explainable as the decisions made by the experts themselves. The goal of providing healthcare in settings where resources and expertise are scarce might come with a normative pull to lower the normative standards of using digital technologies in order to provide at least some healthcare in the first place. We scrutinize this tendency to lower standards in particular settings from a normative perspective, distinguish between different types of absolute and relative, local and global standards of explainability, and conclude by defending an ambitious and practicable standard of local relative explainability.}, language = {en} }