@unpublished{GriegerMehrkanoonBialonski2024, author = {Grieger, Niklas and Mehrkanoon, Siamak and Bialonski, Stephan}, title = {Preprint: Data-efficient sleep staging with synthetic time series pretraining}, series = {arXiv}, journal = {arXiv}, pages = {10 Seiten}, year = {2024}, abstract = {Analyzing electroencephalographic (EEG) time series can be challenging, especially with deep neural networks, due to the large variability among human subjects and often small datasets. To address these challenges, various strategies, such as self-supervised learning, have been suggested, but they typically rely on extensive empirical datasets. Inspired by recent advances in computer vision, we propose a pretraining task termed "frequency pretraining" to pretrain a neural network for sleep staging by predicting the frequency content of randomly generated synthetic time series. Our experiments demonstrate that our method surpasses fully supervised learning in scenarios with limited data and few subjects, and matches its performance in regimes with many subjects. Furthermore, our results underline the relevance of frequency information for sleep stage scoring, while also demonstrating that deep neural networks utilize information beyond frequencies to enhance sleep staging performance, which is consistent with previous research. We anticipate that our approach will be advantageous across a broad spectrum of applications where EEG data is limited or derived from a small number of subjects, including the domain of brain-computer interfaces.}, language = {en} } @article{BornheimGriegerBlanecketal.2024, author = {Bornheim, Tobias and Grieger, Niklas and Blaneck, Patrick Gustav and Bialonski, Stephan}, title = {Speaker Attribution in German Parliamentary Debates with QLoRA-adapted Large Language Models}, series = {Journal for language technology and computational linguistics : JLCL}, volume = {37}, journal = {Journal for language technology and computational linguistics : JLCL}, number = {1}, publisher = {Gesellschaft f{\"u}r Sprachtechnologie und Computerlinguistik}, address = {Regensburg}, issn = {2190-6858}, doi = {10.21248/jlcl.37.2024.244}, pages = {13 Seiten}, year = {2024}, abstract = {The growing body of political texts opens up new opportunities for rich insights into political dynamics and ideologies but also increases the workload for manual analysis. Automated speaker attribution, which detects who said what to whom in a speech event and is closely related to semantic role labeling, is an important processing step for computational text analysis. We study the potential of the large language model family Llama 2 to automate speaker attribution in German parliamentary debates from 2017-2021. We fine-tune Llama 2 with QLoRA, an efficient training strategy, and observe our approach to achieve competitive performance in the GermEval 2023 Shared Task On Speaker Attribution in German News Articles and Parliamentary Debates. Our results shed light on the capabilities of large language models in automating speaker attribution, revealing a promising avenue for computational analysis of political discourse and the development of semantic role labeling systems.}, language = {en} }