@article{GriegerSchwabedalWendeletal.2021, author = {Grieger, Niklas and Schwabedal, Justus T. C. and Wendel, Stefanie and Ritze, Yvonne and Bialonski, Stephan}, title = {Automated scoring of pre-REM sleep in mice with deep learning}, series = {Scientific Reports}, volume = {11}, journal = {Scientific Reports}, number = {Art. 12245}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-021-91286-0}, year = {2021}, abstract = {Reliable automation of the labor-intensive manual task of scoring animal sleep can facilitate the analysis of long-term sleep studies. In recent years, deep-learning-based systems, which learn optimal features from the data, increased scoring accuracies for the classical sleep stages of Wake, REM, and Non-REM. Meanwhile, it has been recognized that the statistics of transitional stages such as pre-REM, found between Non-REM and REM, may hold additional insight into the physiology of sleep and are now under vivid investigation. We propose a classification system based on a simple neural network architecture that scores the classical stages as well as pre-REM sleep in mice. When restricted to the classical stages, the optimized network showed state-of-the-art classification performance with an out-of-sample F1 score of 0.95 in male C57BL/6J mice. When unrestricted, the network showed lower F1 scores on pre-REM (0.5) compared to the classical stages. The result is comparable to previous attempts to score transitional stages in other species such as transition sleep in rats or N1 sleep in humans. Nevertheless, we observed that the sequence of predictions including pre-REM typically transitioned from Non-REM to REM reflecting sleep dynamics observed by human scorers. Our findings provide further evidence for the difficulty of scoring transitional sleep stages, likely because such stages of sleep are under-represented in typical data sets or show large inter-scorer variability. We further provide our source code and an online platform to run predictions with our trained network.}, language = {en} } @article{BornheimGriegerBlanecketal.2024, author = {Bornheim, Tobias and Grieger, Niklas and Blaneck, Patrick Gustav and Bialonski, Stephan}, title = {Speaker Attribution in German Parliamentary Debates with QLoRA-adapted Large Language Models}, series = {Journal for language technology and computational linguistics : JLCL}, volume = {37}, journal = {Journal for language technology and computational linguistics : JLCL}, number = {1}, publisher = {Gesellschaft f{\"u}r Sprachtechnologie und Computerlinguistik}, address = {Regensburg}, issn = {2190-6858}, doi = {10.21248/jlcl.37.2024.244}, pages = {13 Seiten}, year = {2024}, abstract = {The growing body of political texts opens up new opportunities for rich insights into political dynamics and ideologies but also increases the workload for manual analysis. Automated speaker attribution, which detects who said what to whom in a speech event and is closely related to semantic role labeling, is an important processing step for computational text analysis. We study the potential of the large language model family Llama 2 to automate speaker attribution in German parliamentary debates from 2017-2021. We fine-tune Llama 2 with QLoRA, an efficient training strategy, and observe our approach to achieve competitive performance in the GermEval 2023 Shared Task On Speaker Attribution in German News Articles and Parliamentary Debates. Our results shed light on the capabilities of large language models in automating speaker attribution, revealing a promising avenue for computational analysis of political discourse and the development of semantic role labeling systems.}, language = {en} } @article{BialonskiGrieger2023, author = {Bialonski, Stephan and Grieger, Niklas}, title = {Der KI-Chatbot ChatGPT: Eine Herausforderung f{\"u}r die Hochschulen}, series = {Die neue Hochschule}, volume = {2023}, journal = {Die neue Hochschule}, number = {1}, publisher = {HLB}, address = {Bonn}, issn = {0340-448X}, doi = {10.5281/zenodo.7533758}, pages = {24 -- 27}, year = {2023}, abstract = {Essays, Gedichte, Programmcode: ChatGPT generiert automatisch Texte auf bisher unerreicht hohem Niveau. Dieses und nachfolgende Systeme werden nicht nur die akademische Welt nachhaltig ver{\"a}ndern.}, language = {de} }