@inproceedings{SiebigterothKraftSchmidtsetal.2019, author = {Siebigteroth, Ines and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {A Study on Improving Corpus Creation by Pair Annotation}, series = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, booktitle = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, issn = {1613-0073}, pages = {40 -- 44}, year = {2019}, language = {en} } @inproceedings{KohlFreyerKraemeretal.2023, author = {Kohl, Philipp and Freyer, Nils and Kr{\"a}mer, Yoka and Werth, Henri and Wolf, Steffen and Kraft, Bodo and Meinecke, Matthias and Z{\"u}ndorf, Albert}, title = {ALE: a simulation-based active learning evaluation framework for the parameter-driven comparison of query strategies for NLP}, series = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, booktitle = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {978-3-031-39059-3}, pages = {235 -- 253}, year = {2023}, abstract = {Supervised machine learning and deep learning require a large amount of labeled data, which data scientists obtain in a manual, and time-consuming annotation process. To mitigate this challenge, Active Learning (AL) proposes promising data points to annotators they annotate next instead of a subsequent or random sample. This method is supposed to save annotation effort while maintaining model performance. However, practitioners face many AL strategies for different tasks and need an empirical basis to choose between them. Surveys categorize AL strategies into taxonomies without performance indications. Presentations of novel AL strategies compare the performance to a small subset of strategies. Our contribution addresses the empirical basis by introducing a reproducible active learning evaluation (ALE) framework for the comparative evaluation of AL strategies in NLP. The framework allows the implementation of AL strategies with low effort and a fair data-driven comparison through defining and tracking experiment parameters (e.g., initial dataset size, number of data points per query step, and the budget). ALE helps practitioners to make more informed decisions, and researchers can focus on developing new, effective AL strategies and deriving best practices for specific use cases. With best practices, practitioners can lower their annotation costs. We present a case study to illustrate how to use the framework.}, language = {en} } @inproceedings{SildatkeKarwanniKraftetal.2020, author = {Sildatke, Michael and Karwanni, Hendrik and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {Automated Software Quality Monitoring in Research Collaboration Projects}, series = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, booktitle = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, doi = {10.1145/3387940.3391478}, pages = {603 -- 610}, year = {2020}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2021, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of heterogeneous and volatile product data}, series = {DATA 2020: Data Management Technologies and Applications}, booktitle = {DATA 2020: Data Management Technologies and Applications}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-83013-7}, doi = {10.1007/978-3-030-83014-4_7}, pages = {134 -- 153}, year = {2021}, abstract = {The integration of frequently changing, volatile product data from different manufacturers into a single catalog is a significant challenge for small and medium-sized e-commerce companies. They rely on timely integrating product data to present them aggregated in an online shop without knowing format specifications, concept understanding of manufacturers, and data quality. Furthermore, format, concepts, and data quality may change at any time. Consequently, integrating product catalogs into a single standardized catalog is often a laborious manual task. Current strategies to streamline or automate catalog integration use techniques based on machine learning, word vectorization, or semantic similarity. However, most approaches struggle with low-quality or real-world data. We propose Attribute Label Ranking (ALR) as a recommendation engine to simplify the integration process of previously unknown, proprietary tabular format into a standardized catalog for practitioners. We evaluate ALR by focusing on the impact of different neural network architectures, language features, and semantic similarity. Additionally, we consider metrics for industrial application and present the impact of ALR in production and its limitations.}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2020, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of low-quality product data by attribute label ranking}, series = {Proceedings of the 9th International Conference on Data Science, Technology and Applications - Volume 1: DATA}, booktitle = {Proceedings of the 9th International Conference on Data Science, Technology and Applications - Volume 1: DATA}, isbn = {978-989-758-440-4}, doi = {10.5220/0009831000900101}, pages = {90 -- 101}, year = {2020}, language = {en} } @inproceedings{Kraft2004, author = {Kraft, Bodo}, title = {Conceptual design tools for civil engineering}, year = {2004}, abstract = {Applications of Graph Transformations with Industrial Relevance Lecture Notes in Computer Science, 2004, Volume 3062/2004, 434-439, DOI: http://dx.doi.org/10.1007/978-3-540-25959-6_33 This paper gives a brief overview of the tools we have developed to support conceptual design in civil engineering. Based on the UPGRADE framework, two applications, one for the knowledge engineer and another for architects allow to store domain specific knowledge and to use this knowledge during conceptual design. Consistency analyses check the design against the defined knowledge and inform the architect if rules are violated.}, subject = {CAD}, language = {en} } @inproceedings{SchmidtsKraftSchreiberetal.2018, author = {Schmidts, Oliver and Kraft, Bodo and Schreiber, Marc and Z{\"u}ndorf, Albert}, title = {Continuously evaluated research projects in collaborative decoupled environments}, series = {2018 ACM/IEEE 5th International Workshop on Software Engineering Research and Industrial PracticePractice, May 29, 2018, Gothenburg, Sweden : SER\&IP' 18}, booktitle = {2018 ACM/IEEE 5th International Workshop on Software Engineering Research and Industrial PracticePractice, May 29, 2018, Gothenburg, Sweden : SER\&IP' 18}, publisher = {ACM}, address = {New York, NY}, pages = {1 -- 9}, year = {2018}, abstract = {Often, research results from collaboration projects are not transferred into productive environments even though approaches are proven to work in demonstration prototypes. These demonstration prototypes are usually too fragile and error-prone to be transferred easily into productive environments. A lot of additional work is required. Inspired by the idea of an incremental delivery process, we introduce an architecture pattern, which combines the approach of Metrics Driven Research Collaboration with microservices for the ease of integration. It enables keeping track of project goals over the course of the collaboration while every party may focus on their expert skills: researchers may focus on complex algorithms, practitioners may focus on their business goals. Through the simplified integration (intermediate) research results can be introduced into a productive environment which enables getting an early user feedback and allows for the early evaluation of different approaches. The practitioners' business model benefits throughout the full project duration.}, language = {en} } @inproceedings{SchreiberKraftZuendorf2016, author = {Schreiber, Marc and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Cost-efficient quality assurance of natural language processing tools through continuous monitoring with continuous integration}, series = {3rd International Workshop on Software Engineering Research and Industrial Practice}, booktitle = {3rd International Workshop on Software Engineering Research and Industrial Practice}, doi = {10.1145/2897022.2897029}, pages = {46 -- 52}, year = {2016}, language = {en} } @inproceedings{KirchhofKraft2011, author = {Kirchhof, Michael and Kraft, Bodo}, title = {Dogmatisches „Entweder agil oder klassisch" im Projektmanagement hat ausgedient - die richtige Mischung macht's}, series = {Projekt-Sternstunden : strahlende Erfolge durch Kompetenz}, booktitle = {Projekt-Sternstunden : strahlende Erfolge durch Kompetenz}, publisher = {GPM}, address = {N{\"u}rnberg}, isbn = {978-3-924841-60-7}, pages = {414 -- 425}, year = {2011}, language = {de} } @inproceedings{KloeserBuesgenKohletal.2023, author = {Kl{\"o}ser, Lars and B{\"u}sgen, Andr{\´e} and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Explaining relation classification models with semantic extents}, series = {DeLTA 2023: Deep Learning Theory and Applications}, booktitle = {DeLTA 2023: Deep Learning Theory and Applications}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {10.1007/978-3-031-39059-3_13}, pages = {189 -- 208}, year = {2023}, abstract = {In recent years, the development of large pretrained language models, such as BERT and GPT, significantly improved information extraction systems on various tasks, including relation classification. State-of-the-art systems are highly accurate on scientific benchmarks. A lack of explainability is currently a complicating factor in many real-world applications. Comprehensible systems are necessary to prevent biased, counterintuitive, or harmful decisions. We introduce semantic extents, a concept to analyze decision patterns for the relation classification task. Semantic extents are the most influential parts of texts concerning classification decisions. Our definition allows similar procedures to determine semantic extents for humans and models. We provide an annotation tool and a software framework to determine semantic extents for humans and models conveniently and reproducibly. Comparing both reveals that models tend to learn shortcut patterns from data. These patterns are hard to detect with current interpretability methods, such as input reductions. Our approach can help detect and eliminate spurious decision patterns during model development. Semantic extents can increase the reliability and security of natural language processing systems. Semantic extents are an essential step in enabling applications in critical areas like healthcare or finance. Moreover, our work opens new research directions for developing methods to explain deep learning models.}, language = {en} }