diff --git a/bibliography.bib b/bibliography.bib index 23c9dd0..c0730a8 100644 --- a/bibliography.bib +++ b/bibliography.bib @@ -3325,6 +3325,22 @@ file = {/home/charlotte/sync/Zotero/storage/PBSLJNQM/Pham et al. - 2019 - Mining Patterns in Source Code Using Tree Mining A.pdf} } +@article{pierresCouldUseAI2024, + title = {Could {{The Use Of AI In Higher Education Hinder Students With Disabilities}}? {{A Scoping Review}}}, + shorttitle = {Could {{The Use Of AI In Higher Education Hinder Students With Disabilities}}?}, + author = {Pierr{\`e}s, Oriane and Christen, Markus and {Schmitt-Koopmann}, Felix and Darvishy, Alireza}, + year = {2024}, + journal = {IEEE Access}, + pages = {1--1}, + issn = {2169-3536}, + doi = {10.1109/ACCESS.2024.3365368}, + url = {https://ieeexplore.ieee.org/document/10433192}, + urldate = {2024-02-15}, + abstract = {Literature reviews on artificial intelligence (AI) have focused on the different applications of AI in higher education, the AI techniques used, and the benefits/risks of the use of AI. One of the greatest potentials of AI is to personalize higher education to the needs of students and offer timely feedback. This could benefit students with disabilities tremendously if their needs are also considered in the development of new AI educational technologies (EdTech). However, current reviews have failed to address the perspective of students with disabilities, which prompts ethical concerns. For instance, AI could treat people with disabilities as outliers in the data and end up discriminating against them. For that reason, this systematic literature review raises the following two questions: To what extent are ethical concerns considered in articles presenting AI applications assessing students (with disabilities) in higher education? What are the potential risks of using AI that assess students with disabilities in higher education? This scoping review highlights the lack of ethical reflection on AI technologies and an absence of discussion and inclusion of people with disabilities. Moreover, it identifies eight risks associated with the use of AI EdTech for students with disabilities. The review concludes with suggestions on how to mitigate these potential risks. Specifically, it advocates for increased attention to ethics within the field, the involvement of people with disabilities in research and development, as well as careful adoption of AI EdTech in higher education.}, + keywords = {Artificial intelligence,Artificial Intelligence,Assistive technologies,Bibliographies,Disabilities,Education,Educational technologies (EdTech),Educational technology,Ethics,Higher Education,Privacy,Protocols,Risk Assessment,Risk management}, + file = {/home/charlotte/sync/Zotero/storage/MTCRAXRI/Pierrès et al. - 2024 - Could The Use Of AI In Higher Education Hinder Stu.pdf} +} + @article{pintrichUnderstandingSelfregulatedLearning1995, title = {Understanding Self-Regulated Learning}, author = {Pintrich, Paul R.}, diff --git a/book.org b/book.org index 09c75e7..b8326f6 100644 --- a/book.org +++ b/book.org @@ -2,6 +2,7 @@ #+SUBTITLE: Improving automated assessment in programming education through educational data mining #+AUTHOR: Charlotte Van Petegem #+LANGUAGE: en-gb +#+DATE: 2024-03-20 #+LATEX_CLASS: book #+LATEX_CLASS_OPTIONS: [paper=240mm:170mm,parskip=half-,numbers=noendperiod,BCOR=10mm,DIV=10] #+LATEX_COMPILER: lualatex @@ -2616,6 +2617,7 @@ In the first dataset, we run PyLint on those student submissions, and use PyLint Note that in this dataset, we don't make the distinction between the different assignments students had to solve, since the way Pylint annotates them does not differ between assignments. In the second dataset, we use actual annotations left by graders on student code in Dodona. Here we train and test per assignment, since the set of messages that were used is also different for each assignment. + We differentiate between these two datasets, because we expect PyLint to be more consistent in when it places an annotation and also where it places that annotation. Most linting messages are detected through explicit pattern matching in the AST, so we expect our implicit pattern matching to perform rather well. Real-world data is more difficult, since graders are humans, and might miss an issue in one student's code that they annotated in another student's code, or they might not place the annotation for a certain message in a consistent location. @@ -2735,10 +2737,6 @@ Another important aspect that was explicitly left out of scope in this manuscrip :CUSTOM_ID: chap:discussion :END: -Dodona is a pretty good piece of software. -People use it, and like to use it, for some reason. -We should probably try make sure that this is still the case in the future. - - Successful platform - Lots of users - Interesting data for scientific research