@article{KraftHeerRetkowitz2008, author = {Kraft, Bodo and Heer, Thomas and Retkowitz, Daniel}, title = {Algorithm and Tool for Ontology Integration Based on Graph Rewriting / Heer, Thomas ; Retkowitz, Daniel ; Kraft, Bodo}, series = {Applications of Graph Transformations with Industrial Relevance / Third International Symposium, AGTIVE 2007, Kassel, Germany, October 10-12, 2007, Revised Selected and Invited Papers}, journal = {Applications of Graph Transformations with Industrial Relevance / Third International Symposium, AGTIVE 2007, Kassel, Germany, October 10-12, 2007, Revised Selected and Invited Papers}, isbn = {978-3-540-89019-5}, pages = {577 -- 582}, year = {2008}, language = {en} } @inproceedings{KohlFreyerKraemeretal.2023, author = {Kohl, Philipp and Freyer, Nils and Kr{\"a}mer, Yoka and Werth, Henri and Wolf, Steffen and Kraft, Bodo and Meinecke, Matthias and Z{\"u}ndorf, Albert}, title = {ALE: a simulation-based active learning evaluation framework for the parameter-driven comparison of query strategies for NLP}, series = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, booktitle = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {978-3-031-39059-3}, pages = {235 -- 253}, year = {2023}, abstract = {Supervised machine learning and deep learning require a large amount of labeled data, which data scientists obtain in a manual, and time-consuming annotation process. To mitigate this challenge, Active Learning (AL) proposes promising data points to annotators they annotate next instead of a subsequent or random sample. This method is supposed to save annotation effort while maintaining model performance. However, practitioners face many AL strategies for different tasks and need an empirical basis to choose between them. Surveys categorize AL strategies into taxonomies without performance indications. Presentations of novel AL strategies compare the performance to a small subset of strategies. Our contribution addresses the empirical basis by introducing a reproducible active learning evaluation (ALE) framework for the comparative evaluation of AL strategies in NLP. The framework allows the implementation of AL strategies with low effort and a fair data-driven comparison through defining and tracking experiment parameters (e.g., initial dataset size, number of data points per query step, and the budget). ALE helps practitioners to make more informed decisions, and researchers can focus on developing new, effective AL strategies and deriving best practices for specific use cases. With best practices, practitioners can lower their annotation costs. We present a case study to illustrate how to use the framework.}, language = {en} } @inproceedings{SiebigterothKraftSchmidtsetal.2019, author = {Siebigteroth, Ines and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {A Study on Improving Corpus Creation by Pair Annotation}, series = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, booktitle = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, issn = {1613-0073}, pages = {40 -- 44}, year = {2019}, language = {en} }