@article{BornheimGriegerBlanecketal.2024, author = {Bornheim, Tobias and Grieger, Niklas and Blaneck, Patrick Gustav and Bialonski, Stephan}, title = {Speaker Attribution in German Parliamentary Debates with QLoRA-adapted Large Language Models}, series = {Journal for language technology and computational linguistics : JLCL}, volume = {37}, journal = {Journal for language technology and computational linguistics : JLCL}, number = {1}, publisher = {Gesellschaft f{\"u}r Sprachtechnologie und Computerlinguistik}, address = {Regensburg}, issn = {2190-6858}, doi = {10.21248/jlcl.37.2024.244}, pages = {13 Seiten}, year = {2024}, abstract = {The growing body of political texts opens up new opportunities for rich insights into political dynamics and ideologies but also increases the workload for manual analysis. Automated speaker attribution, which detects who said what to whom in a speech event and is closely related to semantic role labeling, is an important processing step for computational text analysis. We study the potential of the large language model family Llama 2 to automate speaker attribution in German parliamentary debates from 2017-2021. We fine-tune Llama 2 with QLoRA, an efficient training strategy, and observe our approach to achieve competitive performance in the GermEval 2023 Shared Task On Speaker Attribution in German News Articles and Parliamentary Debates. Our results shed light on the capabilities of large language models in automating speaker attribution, revealing a promising avenue for computational analysis of political discourse and the development of semantic role labeling systems.}, language = {en} } @unpublished{BornheimNiklasBlanecketal.2023, author = {Bornheim, Tobias and Niklas, Grieger and Blaneck, Patrick Gustav and Bialonski, Stephan}, title = {Preprint: Speaker attribution in German parliamentary debates with QLoRA-adapted large language models}, series = {Journal for Language Technology and Computational Linguistics}, journal = {Journal for Language Technology and Computational Linguistics}, doi = {10.48550/arXiv.2309.09902}, pages = {8 Seiten}, year = {2023}, abstract = {The growing body of political texts opens up new opportunities for rich insights into political dynamics and ideologies but also increases the workload for manual analysis. Automated speaker attribution, which detects who said what to whom in a speech event and is closely related to semantic role labeling, is an important processing step for computational text analysis. We study the potential of the large language model family Llama 2 to automate speaker attribution in German parliamentary debates from 2017-2021. We fine-tune Llama 2 with QLoRA, an efficient training strategy, and observe our approach to achieve competitive performance in the GermEval 2023 Shared Task On Speaker Attribution in German News Articles and Parliamentary Debates. Our results shed light on the capabilities of large language models in automating speaker attribution, revealing a promising avenue for computational analysis of political discourse and the development of semantic role labeling systems.}, language = {en} } @inproceedings{BornheimGriegerBialonski2021, author = {Bornheim, Tobias and Grieger, Niklas and Bialonski, Stephan}, title = {FHAC at GermEval 2021: Identifying German toxic, engaging, and fact-claiming comments with ensemble learning}, series = {Proceedings of the GermEval 2021 Workshop on the Identification of Toxic, Engaging, and Fact-Claiming Comments : 17th Conference on Natural Language Processing KONVENS 2021}, booktitle = {Proceedings of the GermEval 2021 Workshop on the Identification of Toxic, Engaging, and Fact-Claiming Comments : 17th Conference on Natural Language Processing KONVENS 2021}, publisher = {Heinrich Heine University}, address = {D{\"u}sseldorf}, doi = {10.48415/2021/fhw5-x128}, pages = {105 -- 111}, year = {2021}, language = {en} } @inproceedings{BlaneckBornheimGriegeretal.2022, author = {Blaneck, Patrick Gustav and Bornheim, Tobias and Grieger, Niklas and Bialonski, Stephan}, title = {Automatic readability assessment of german sentences with transformer ensembles}, series = {Proceedings of the GermEval 2022 Workshop on Text Complexity Assessment of German Text}, booktitle = {Proceedings of the GermEval 2022 Workshop on Text Complexity Assessment of German Text}, publisher = {Association for Computational Linguistics}, address = {Potsdam}, doi = {10.48550/arXiv.2209.04299}, pages = {57 -- 62}, year = {2022}, abstract = {Reliable methods for automatic readability assessment have the potential to impact a variety of fields, ranging from machine translation to self-informed learning. Recently, large language models for the German language (such as GBERT and GPT-2-Wechsel) have become available, allowing to develop Deep Learning based approaches that promise to further improve automatic readability assessment. In this contribution, we studied the ability of ensembles of fine-tuned GBERT and GPT-2-Wechsel models to reliably predict the readability of German sentences. We combined these models with linguistic features and investigated the dependence of prediction performance on ensemble size and composition. Mixed ensembles of GBERT and GPT-2-Wechsel performed better than ensembles of the same size consisting of only GBERT or GPT-2-Wechsel models. Our models were evaluated in the GermEval 2022 Shared Task on Text Complexity Assessment on data of German sentences. On out-of-sample data, our best ensemble achieved a root mean squared error of 0:435.}, language = {en} }