To increase trust in systems, engineers strive to create explanations that are as accurate as possible. However, if the system’s accuracy is compromised, providing explanations for its incorrect behavior may inadvertently lead to misleading explanations. This concern is particularly pertinent when the correctness of the system is difficult for users to judge. In an online survey experiment with 162 participants, we analyze the impact of misleading explanations on users’ perceived and demonstrated trust in a system that performs a hardly assessable task in an unreliable manner. Participants who used a system that provided potentially misleading explanations rated their trust significantly higher than participants who saw the system’s prediction alone. They also aligned their initial prediction with the system’s prediction significantly more often. Our findings underscore the importance of exercising caution when generating explanations, especially in tasks that are inherently difficult to evaluate. The paper and supplementary materials are available at https://doi.org/10.17605/osf.io/azu72
%0 Conference Paper
%1 10.1145/3627043.3659573
%A Sadeghi, Mersedeh
%A Pöttgen, Daniel
%A Ebel, Patrick
%A Vogelsang, Andreas
%B Proceedings of the 32nd ACM Conference on User Modeling, Adaptation and Personalization
%C New York, NY, USA
%D 2024
%I Association for Computing Machinery
%K topic_visualcomputing XAI, explainability, learning, machine trust
%P 36–46
%R 10.1145/3627043.3659573
%T Explaining the Unexplainable: The Impact of Misleading Explanations on Trust in Unreliable Predictions for Hardly Assessable Tasks
%U https://doi.org/10.1145/3627043.3659573
%X To increase trust in systems, engineers strive to create explanations that are as accurate as possible. However, if the system’s accuracy is compromised, providing explanations for its incorrect behavior may inadvertently lead to misleading explanations. This concern is particularly pertinent when the correctness of the system is difficult for users to judge. In an online survey experiment with 162 participants, we analyze the impact of misleading explanations on users’ perceived and demonstrated trust in a system that performs a hardly assessable task in an unreliable manner. Participants who used a system that provided potentially misleading explanations rated their trust significantly higher than participants who saw the system’s prediction alone. They also aligned their initial prediction with the system’s prediction significantly more often. Our findings underscore the importance of exercising caution when generating explanations, especially in tasks that are inherently difficult to evaluate. The paper and supplementary materials are available at https://doi.org/10.17605/osf.io/azu72
%@ 9798400704338
@inproceedings{10.1145/3627043.3659573,
abstract = {To increase trust in systems, engineers strive to create explanations that are as accurate as possible. However, if the system’s accuracy is compromised, providing explanations for its incorrect behavior may inadvertently lead to misleading explanations. This concern is particularly pertinent when the correctness of the system is difficult for users to judge. In an online survey experiment with 162 participants, we analyze the impact of misleading explanations on users’ perceived and demonstrated trust in a system that performs a hardly assessable task in an unreliable manner. Participants who used a system that provided potentially misleading explanations rated their trust significantly higher than participants who saw the system’s prediction alone. They also aligned their initial prediction with the system’s prediction significantly more often. Our findings underscore the importance of exercising caution when generating explanations, especially in tasks that are inherently difficult to evaluate. The paper and supplementary materials are available at https://doi.org/10.17605/osf.io/azu72},
added-at = {2024-09-10T10:41:24.000+0200},
address = {New York, NY, USA},
author = {Sadeghi, Mersedeh and P\"{o}ttgen, Daniel and Ebel, Patrick and Vogelsang, Andreas},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2724d9689e43c4f686bcca6b8f97e6166/scadsfct},
booktitle = {Proceedings of the 32nd ACM Conference on User Modeling, Adaptation and Personalization},
doi = {10.1145/3627043.3659573},
interhash = {7e49e4681dbb8add8f46b5bd1499c418},
intrahash = {724d9689e43c4f686bcca6b8f97e6166},
isbn = {9798400704338},
keywords = {topic_visualcomputing XAI, explainability, learning, machine trust},
location = {Cagliari, Italy},
numpages = {11},
pages = {36–46},
publisher = {Association for Computing Machinery},
series = {UMAP '24},
timestamp = {2024-11-22T15:49:55.000+0100},
title = {Explaining the Unexplainable: The Impact of Misleading Explanations on Trust in Unreliable Predictions for Hardly Assessable Tasks},
url = {https://doi.org/10.1145/3627043.3659573},
year = 2024
}