@inproceedings{96a3224a7a14426eb749c0cb7640dec8,
title = "LLMs in Open and Closed Book Examinations in a Final Year Applied Machine Learning Course (Early Findings).",
abstract = "This research has three prongs, with each comparing open- and closed-book exam questions across six years (2017-2023) in a final year undergraduate applied machine learning course. First, the authors evaluated the performance of numerous LLMs, compared to student performance, and comparing open and closed book exams. Second, at a micro level, the examination questions and categories for which LLMs were most and least effective were compared. This level of analysis is rarely if ever, discussed in the literature. The research finally investigates LLM detection techniques, specifically their efficacy in identifying replies created wholly by an LLM. It considers both raw LLM outputs and LLM outputs that have been tampered with by students, with an emphasis on academic integrity. This study is a staff-student research collaboration, featuring contributions from eight academic professionals and six students.",
keywords = "AI, Assessment, Detection, Large Language Models, LLMs, ML",
author = "Keith Quille and Damien Gordon and Markus Hofmann and Becker, {Brett A.} and Miriam Harte and Keith Nolan and Roisin Faherty and Svetlana Hensman and Ciaran O{\textquoteright}Leary",
note = "Publisher Copyright: {\textcopyright} 2024 Copyright held by the owner/author(s); 29th Conference Innovation and Technology in Computer Science Education, ITiCSE 2024 ; Conference date: 08-07-2024 Through 10-07-2024",
year = "2024",
month = jul,
day = "8",
doi = "10.1145/3649405.3659514",
language = "English",
series = "Annual Conference on Innovation and Technology in Computer Science Education, ITiCSE",
publisher = "Association for Computing Machinery",
pages = "822",
booktitle = "ITiCSE 2024 - Proceedings of the 2024 Conference Innovation and Technology in Computer Science Education",
address = "United States",
}