@ARTICLE{10.21494/ISTE.OP.2024.1147, TITLE={Toward semantic XAI – the third wave of explainable artificial intelligence}, AUTHOR={Mathias Bollaert , Gilles Coppin, }, JOURNAL={Cognitive Engineering}, VOLUME={7}, NUMBER={Issue 2}, YEAR={2024}, URL={https://openscience.fr/Toward-semantic-XAI-the-third-wave-of-explainable-artificial-intelligence}, DOI={10.21494/ISTE.OP.2024.1147}, ISSN={2517-6978}, ABSTRACT={To respond to the problems posed by the growing use of AI models in high stakes applications, explainable artificial intelligence (XAI) has experienced significant growth in recent years. Initially dedicated to the search for technical solutions making it possible to produce explanations automatically, it encountered several difficulties, in particular when these solutions were confronted with non-expert end users. The XAI then sought to draw inspiration from the social sciences to produce explanations that were easier to understand. Despite some encouraging results, this new approach has not brought as much as hoped. This article analyzes the evolution of the XAI through these two periods. He discusses possible reasons for the difficulties encountered, and then proposes a new approach to improve the automated production of explanations. This approach, called semantic explainability or S-XAI, focuses on user cognition. While previous methods are oriented towards algorithms or causality, S-XAI starts from the principle that understanding relies above all on the user’s ability to appropriate the meaning of what is explained.}}