@inproceedings{BertschMeineckeWolfetal.2019, author = {Bertsch, Timo and Meinecke, Matthias and Wolf, Martin and Schmunk, Karina}, title = {Smart-Living-Services nur gegen Daten? Process-Mining als M{\"o}glichkeit zur Steigerung der Akzeptanz!}, series = {Angewandte Forschung in der Wirtschaftsinformatik 2019 : Tagungsband zur 32. AKWI-Jahrestagung / hrsg. von Martin R. Wolf, Thomas Barton, Frank Herrmann, Vera G. Meister, Christian M{\"u}ller, Christian Seel}, booktitle = {Angewandte Forschung in der Wirtschaftsinformatik 2019 : Tagungsband zur 32. AKWI-Jahrestagung / hrsg. von Martin R. Wolf, Thomas Barton, Frank Herrmann, Vera G. Meister, Christian M{\"u}ller, Christian Seel}, publisher = {mana-Buch}, address = {Heide}, isbn = {978-3-944330-62-4}, pages = {216 -- 226}, year = {2019}, abstract = {Seit Jahren etablieren sich Technologien in unserem Alltag, die mit Hilfe von smarten Komponenten neue Services und Vernetzungsm{\"o}glichkeiten schaffen. Dieses Paper beschreibt die Ergebnisse einer Studie, die die Akzeptanz von IoT-gest{\"u}tzten, smarten Services im privaten Umfeld untersucht. Dabei wird eine zentrale Datenverarbeitung mit automatisierter Erstellung smarter Services der dezentralen Datenverarbeitung mit manueller Serviceerstellung in sieben Kategorien gegen{\"u}bergestellt. Die Auswertung der Studie legt die Forschungsfrage nahe, ob das Nutzerverhalten im Kontext Smart Living nicht auch mit einem dezentralen L{\"o}sungsansatz, und somit unabh{\"a}ngig von großen Unternehmen, analysiert werden kann. Hierf{\"u}r wird im zweiten Teil des Papers die Anwendbarkeit von Process-Mining im Bereich Smart Living untersucht und prototypisch getestet.}, language = {de} } @incollection{KraftKohlMeinecke2024, author = {Kraft, Bodo and Kohl, Philipp and Meinecke, Matthias}, title = {Analyse und Nachverfolgung von Projektzielen durch Einsatz von Natural Language Processing}, series = {KI in der Projektwirtschaft : was ver{\"a}ndert sich durch KI im Projektmanagement?}, booktitle = {KI in der Projektwirtschaft : was ver{\"a}ndert sich durch KI im Projektmanagement?}, editor = {Bernert, Christian and Scheurer, Steffen and Wehnes, Harald}, publisher = {UVK Verlag}, isbn = {978-3-3811-1132-9 (Online)}, doi = {10.24053/9783381111329}, pages = {157 -- 167}, year = {2024}, language = {de} } @inproceedings{KohlFreyerKraemeretal.2023, author = {Kohl, Philipp and Freyer, Nils and Kr{\"a}mer, Yoka and Werth, Henri and Wolf, Steffen and Kraft, Bodo and Meinecke, Matthias and Z{\"u}ndorf, Albert}, title = {ALE: a simulation-based active learning evaluation framework for the parameter-driven comparison of query strategies for NLP}, series = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, booktitle = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {978-3-031-39059-3}, pages = {235 -- 253}, year = {2023}, abstract = {Supervised machine learning and deep learning require a large amount of labeled data, which data scientists obtain in a manual, and time-consuming annotation process. To mitigate this challenge, Active Learning (AL) proposes promising data points to annotators they annotate next instead of a subsequent or random sample. This method is supposed to save annotation effort while maintaining model performance. However, practitioners face many AL strategies for different tasks and need an empirical basis to choose between them. Surveys categorize AL strategies into taxonomies without performance indications. Presentations of novel AL strategies compare the performance to a small subset of strategies. Our contribution addresses the empirical basis by introducing a reproducible active learning evaluation (ALE) framework for the comparative evaluation of AL strategies in NLP. The framework allows the implementation of AL strategies with low effort and a fair data-driven comparison through defining and tracking experiment parameters (e.g., initial dataset size, number of data points per query step, and the budget). ALE helps practitioners to make more informed decisions, and researchers can focus on developing new, effective AL strategies and deriving best practices for specific use cases. With best practices, practitioners can lower their annotation costs. We present a case study to illustrate how to use the framework.}, language = {en} } @inproceedings{FreyerThewesMeinecke2023, author = {Freyer, Nils and Thewes, Dustin and Meinecke, Matthias}, title = {GUIDO: a hybrid approach to guideline discovery \& ordering from natural language texts}, series = {Proceedings of the 12th International Conference on Data Science, Technology and Applications DATA - Volume 1}, booktitle = {Proceedings of the 12th International Conference on Data Science, Technology and Applications DATA - Volume 1}, editor = {Gusikhin, Oleg and Hammoudi, Slimane and Cuzzocrea, Alfredo}, isbn = {978-989-758-664-4}, issn = {2184-285X}, doi = {10.5220/0012084400003541}, pages = {335 -- 342}, year = {2023}, abstract = {Extracting workflow nets from textual descriptions can be used to simplify guidelines or formalize textual descriptions of formal processes like business processes and algorithms. The task of manually extracting processes, however, requires domain expertise and effort. While automatic process model extraction is desirable, annotating texts with formalized process models is expensive. Therefore, there are only a few machine-learning-based extraction approaches. Rule-based approaches, in turn, require domain specificity to work well and can rarely distinguish relevant and irrelevant information in textual descriptions. In this paper, we present GUIDO, a hybrid approach to the process model extraction task that first, classifies sentences regarding their relevance to the process model, using a BERT-based sentence classifier, and second, extracts a process model from the sentences classified as relevant, using dependency parsing. The presented approach achieves significantly better resul ts than a pure rule-based approach. GUIDO achieves an average behavioral similarity score of 0.93. Still, in comparison to purely machine-learning-based approaches, the annotation costs stay low.}, language = {en} }