Update bibliography
This commit is contained in:
parent
c179b8f294
commit
a0450c5bb7
1 changed files with 79 additions and 0 deletions
|
@ -387,6 +387,23 @@
|
|||
file = {/home/charlotte/sync/Zotero/storage/IMAEDND9/Bennedsen and Caspersen - 2004 - Programming in context a model-first approach to .pdf}
|
||||
}
|
||||
|
||||
@article{berniusMachineLearningBased2022,
|
||||
title = {Machine Learning Based Feedback on Textual Student Answers in Large Courses},
|
||||
author = {Bernius, Jan Philip and Krusche, Stephan and Bruegge, Bernd},
|
||||
year = {2022},
|
||||
month = jan,
|
||||
journal = {Computers and Education: Artificial Intelligence},
|
||||
volume = {3},
|
||||
pages = {100081},
|
||||
issn = {2666-920X},
|
||||
doi = {10.1016/j.caeai.2022.100081},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S2666920X22000364},
|
||||
urldate = {2024-01-10},
|
||||
abstract = {Many engineering disciplines require problem-solving skills, which cannot be learned by memorization alone. Open-ended textual exercises allow students to acquire these skills. Students can learn from their mistakes when instructors provide individual feedback. However, grading these exercises is often a manual, repetitive, and time-consuming activity. The number of computer science students graduating per year has steadily increased over the last decade. This rise has led to large courses that cause a heavy workload for instructors, especially if they provide individual feedback to students. This article presents CoFee, a framework to generate and suggest computer-aided feedback for textual exercises based on machine learning. CoFee utilizes a segment-based grading concept, which links feedback to text segments. CoFee automates grading based on topic modeling and an assessment knowledge repository acquired during previous assessments. A language model builds an intermediate representation of the text segments. Hierarchical clustering identifies groups of similar text segments to reduce the grading overhead. We first demonstrated the CoFee framework in a small laboratory experiment in 2019, which showed that the grading overhead could be reduced by 85\%. This experiment confirmed the feasibility of automating the grading process for problem-solving exercises. We then evaluated CoFee in a large course at the Technical University of Munich from 2019 to 2021, with up to 2, 200 enrolled students per course. We collected data from 34 exercises offered in each of these courses. On average, CoFee suggested feedback for 45\% of the submissions. 92\% (Positive Predictive Value) of these suggestions were precise and, therefore, accepted by the instructors.},
|
||||
keywords = {Assessment support system,Automatic assessment,Education,Feedback,Grading,Interactive learning,Learning,Software engineering},
|
||||
file = {/home/charlotte/sync/Zotero/storage/UWSG2P4L/Bernius et al. - 2022 - Machine learning based feedback on textual student.pdf;/home/charlotte/sync/Zotero/storage/TLLKP87F/S2666920X22000364.html}
|
||||
}
|
||||
|
||||
@inproceedings{bethkeryExploringExploratoryProgramming2017,
|
||||
title = {Exploring Exploratory Programming},
|
||||
booktitle = {2017 {{IEEE Symposium}} on {{Visual Languages}} and {{Human-Centric Computing}} ({{VL}}/{{HCC}})},
|
||||
|
@ -1915,6 +1932,23 @@
|
|||
abstract = {The main goal of this study was to examine the effects of authors' name and gender on judges' assessment of product creativity in 4 different domains (art, science, music, and poetry). A total of 119 participants divided into 5 groups assessed products signed with a fictional author's name (unique vs. typical, male vs. female) or in an anonymous condition. It was observed that depending on the domain, the uniqueness of the author's name and her or his gender was associated with the assessment of creativity of the product. A poem and painting signed with an unusual name and a piece of music whose authorship was attributed to a man with a unique name were assessed as especially creative. In case of scientific theory, works attributed to men were assessed as significantly more creative than those of women. The results are discussed in light of the attributional approach to creativity.}
|
||||
}
|
||||
|
||||
@article{leeSupportingStudentsGeneration2023,
|
||||
title = {Supporting Students' Generation of Feedback in Large-Scale Online Course with Artificial Intelligence-Enabled Evaluation},
|
||||
author = {Lee, Alwyn Vwen Yen},
|
||||
year = {2023},
|
||||
month = jun,
|
||||
journal = {Studies in Educational Evaluation},
|
||||
volume = {77},
|
||||
pages = {101250},
|
||||
issn = {0191-491X},
|
||||
doi = {10.1016/j.stueduc.2023.101250},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S0191491X23000160},
|
||||
urldate = {2024-01-10},
|
||||
abstract = {Educators in large-scale online courses tend to lack the necessary resources to generate and provide adequate feedback for all students, especially when students' learning outcomes are evaluated through student writing. As a result, students welcome peer feedback and sometimes generate self-feedback to widen their perspectives and obtain feedback, but often lack the support to do so. This study, as part of a larger project, sought to address this prevalent problem in large-scale courses by allowing students to write essays as an expression of their opinions and response to others, conduct peer and self-evaluation, using provided rubric and Artificial Intelligence (AI)-enabled evaluation to aid the giving and receiving of feedback. A total of 605 undergraduate students were part of a large-scale online course and contributed over 2500 short essays during a semester. The research design uses a mixed-methods approach, consisting qualitative measures used during essay coding, and quantitative methods from the application of machine learning algorithms. With limited instructors and resources, students first use instructor-developed rubric to conduct peer and self-assessment, while instructors qualitatively code a subset of essays that are used as inputs for training a machine learning model, which is subsequently used to provide automated scores and an accuracy rate for the remaining essays. With AI-enabled evaluation, the provision of feedback can become a sustainable process with students receiving and using meaningful feedback for their work, entailing shared responsibility from teachers and students, and becoming more effective.},
|
||||
keywords = {Artificial intelligence,Formative assessment,Machine learning,Online course,Peer and self-feedback},
|
||||
file = {/home/charlotte/sync/Zotero/storage/4V95XI77/Lee - 2023 - Supporting students’ generation of feedback in lar.pdf;/home/charlotte/sync/Zotero/storage/WGZCH6HT/S0191491X23000160.html}
|
||||
}
|
||||
|
||||
@article{leibaOAuthWebAuthorization2012,
|
||||
title = {{{OAuth Web Authorization Protocol}}},
|
||||
author = {Leiba, Barry},
|
||||
|
@ -1948,6 +1982,14 @@
|
|||
file = {/home/charlotte/sync/Zotero/storage/V7D5GFFW/Liao et al. - 2019 - A Robust Machine Learning Technique to Predict Low.pdf}
|
||||
}
|
||||
|
||||
@inproceedings{lienard2023extracting,
|
||||
title = {Extracting Unit Tests from Patterns Mined in Student Code to Provide Improved Feedback in Autograders},
|
||||
booktitle = {Seminar Series on Advanced Techniques \& Tools for Software Evolution ({{SATToSE}})},
|
||||
author = {Lienard, Julien and Mens, Kim and Nijssen, Siegfried},
|
||||
year = {2023},
|
||||
file = {/home/charlotte/sync/Zotero/storage/V2ECFWXV/Lienard et al. - 2023 - Extracting unit tests from patterns mined in stude.pdf}
|
||||
}
|
||||
|
||||
@article{livierisPredictingSecondarySchool2019,
|
||||
title = {Predicting {{Secondary School Students}}' {{Performance Utilizing}} a {{Semi-supervised Learning Approach}}},
|
||||
author = {Livieris, Ioannis E. and Drakopoulou, Konstantina and Tampakas, Vassilis T. and Mikropoulos, Tassos A. and Pintelas, Panagiotis},
|
||||
|
@ -2611,6 +2653,24 @@
|
|||
file = {/home/charlotte/sync/Zotero/storage/HN484VJ9/Osmanbegovic and Suljic - 2012 - Data mining approach for predicting student perfor.pdf;/home/charlotte/sync/Zotero/storage/AGG6SJ86/193806.html}
|
||||
}
|
||||
|
||||
@article{owenImpactFeedbackFormative2016,
|
||||
title = {The {{Impact}} of {{Feedback}} as {{Formative Assessment}} on {{Student Performance}}},
|
||||
author = {Owen, Leanne},
|
||||
year = {2016},
|
||||
journal = {International Journal of Teaching and Learning in Higher Education},
|
||||
volume = {28},
|
||||
number = {2},
|
||||
pages = {168--175},
|
||||
publisher = {{International Society for Exploring Teaching and Learning}},
|
||||
url = {https://eric.ed.gov/?id=EJ1111131},
|
||||
urldate = {2024-01-10},
|
||||
abstract = {This article provides an evaluation of the redesign of a research methods course intended to enhance students' learning for understanding and transfer. Drawing on principles of formative assessment from the existing academic literature, the instructor introduced a number of increasingly complex low-stakes assignments for students to complete prior to submitting their final project. Concrete, constructive feedback from either the instructor or peers or both was offered at each stage of the project so that students could have the opportunity to review their work and improve particular aspects prior to moving on to the next assignment. Student performance on each subsequent submission was assessed through the use of a scoring rubric. Although there was significant improvement from one draft of a given assignment (T1) to the next (T2), the instructor's decision not to require a preliminary draft of the final project ultimately yielded mixed results at the end of the course (T3); this serves to highlight the importance of providing multiple active learning opportunities for students by using a progressive scaffolding approach.},
|
||||
langid = {english},
|
||||
keywords = {Academic Achievement,Active Learning,Assignments,College Students,Feedback (Response),Formative Evaluation,Instructional Design,Methods Courses,Research Methodology,Scaffolding (Teaching Technique),Scoring Rubrics,Student Evaluation,Student Projects},
|
||||
annotation = {ERIC Number: EJ1111131},
|
||||
file = {/home/charlotte/sync/Zotero/storage/KKLMLPAK/Owen - 2016 - The Impact of Feedback as Formative Assessment on .pdf}
|
||||
}
|
||||
|
||||
@article{paivaAutomatedAssessmentComputer2022,
|
||||
title = {Automated {{Assessment}} in {{Computer Science Education}}: {{A State-of-the-Art Review}}},
|
||||
shorttitle = {Automated {{Assessment}} in {{Computer Science Education}}},
|
||||
|
@ -3870,6 +3930,25 @@
|
|||
file = {/home/charlotte/sync/Zotero/storage/KZU22XRH/Xing and Du - 2019 - Dropout Prediction in MOOCs Using Deep Learning f.pdf}
|
||||
}
|
||||
|
||||
@article{xuArtificialIntelligenceConstructing2023,
|
||||
title = {Artificial Intelligence in Constructing Personalized and Accurate Feedback Systems for Students},
|
||||
author = {Xu, Wenzhong and Meng, Jun and Raja, S. Kanaga Suba and Priya, M. Padma and Kiruthiga Devi, M.},
|
||||
year = {2023},
|
||||
month = feb,
|
||||
journal = {International Journal of Modeling, Simulation, and Scientific Computing},
|
||||
volume = {14},
|
||||
number = {01},
|
||||
pages = {2341001},
|
||||
publisher = {{World Scientific Publishing Co.}},
|
||||
issn = {1793-9623},
|
||||
doi = {10.1142/S1793962323410015},
|
||||
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793962323410015},
|
||||
urldate = {2024-01-10},
|
||||
abstract = {Artificial Intelligence (AI) systems have evolved with digital learning developments to provide thriving soft groups with digital opportunities in response to feedback. When it comes to learning environments, educators' training feedback is often used as a response recourse. Through the use of final evaluations, students receive feedback that improves their education abilities. To improve academic achievement and explore knowledge in the learning process, this section provides an AI-assisted personalized feedback system (AI-PFS). An individualized feedback system is implemented to learn more about the student's lack of academic experience interactivity and different collaboration behaviors. According to their benchmark, PFS aims to establish a personalized and reliable feedback process for each class based on their collaborative process and learn analytics modules. It has been proposed to use multi-objective implementations to evaluate students regarding the learning results and teaching methods. With different series of questions sessions for students, AI-PFS has been designed, and the findings showed that it greatly enhances the performance rate of 95.32\% with personalized and reasonable predictive.},
|
||||
keywords = {artificial intelligence,feedback system,Students feedback},
|
||||
file = {/home/charlotte/sync/Zotero/storage/45YZ53JJ/Xu et al. - 2023 - Artificial intelligence in constructing personaliz.pdf}
|
||||
}
|
||||
|
||||
@article{yangFacilitatingInteractionsStructured2008,
|
||||
title = {Facilitating Interactions through Structured Web-Based Bulletin Boards: {{A}} Quasi-Experimental Study on Promoting Learners' Critical Thinking Skills},
|
||||
shorttitle = {Facilitating Interactions through Structured Web-Based Bulletin Boards},
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue