@ARTICLE{10.21494/ISTE.OP.2026.1405, TITLE={Risks and benefits of conversational agents for naive access to scientific knowledge}, AUTHOR={Robert VISEUR, }, JOURNAL={Open Journal in Information Systems Engineering}, VOLUME={6}, NUMBER={Special Issue}, YEAR={2026}, URL={https://openscience.fr/Risks-and-benefits-of-conversational-agents-for-naive-access-to-scientific}, DOI={10.21494/ISTE.OP.2026.1405}, ISSN={2634-1468}, ABSTRACT={This article examines the risks associated with using generative conversational agents such as ChatGPT to access scientific knowledge (and, more broadly, technical and medical knowledge). The evolution of the Web has been accompanied by a shift in gatekeeping towards algorithmic forms, of which generative artificial intelligences are the latest manifestation. Their limitations, most notably hallucinations and various biases, are, however, well documented. Are these conversational agents therefore suitable for tasks of scientific mediation? Their performance depends not only on the properties of their algorithms but also on the availability of training data in sufficient quantity and quality. Access to content on news websites is, moreover, frequently hindered by publishers. What, then, of scientific content managed by commercial academic publishers? Must developers of generative chatbots rely on lower-quality material, with harmful consequences for the reliability of responses? We therefore analyse the risks of scientific misinformation stemming from constraints on data access. We then discuss these risks more broadly, when such agents are used as scientific mediators, across different usage scenarios.}}