@inproceedings{SchreiberKraftZuendorf2016, author = {Schreiber, Marc and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Cost-efficient quality assurance of natural language processing tools through continuous monitoring with continuous integration}, series = {3rd International Workshop on Software Engineering Research and Industrial Practice}, booktitle = {3rd International Workshop on Software Engineering Research and Industrial Practice}, doi = {10.1145/2897022.2897029}, pages = {46 -- 52}, year = {2016}, language = {en} } @inproceedings{SchreiberKraftZuendorf2017, author = {Schreiber, Marc and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Metrics Driven Research Collaboration: Focusing on Common Project Goals Continuously}, series = {39th International Conference on Software Engineering, May 20-28, 2017 - Buenos Aires, Argentina}, booktitle = {39th International Conference on Software Engineering, May 20-28, 2017 - Buenos Aires, Argentina}, pages = {8 Seiten}, year = {2017}, abstract = {Research collaborations provide opportunities for both practitioners and researchers: practitioners need solutions for difficult business challenges and researchers are looking for hard problems to solve and publish. Nevertheless, research collaborations carry the risk that practitioners focus on quick solutions too much and that researchers tackle theoretical problems, resulting in products which do not fulfill the project requirements. In this paper we introduce an approach extending the ideas of agile and lean software development. It helps practitioners and researchers keep track of their common research collaboration goal: a scientifically enriched software product which fulfills the needs of the practitioner's business model. This approach gives first-class status to application-oriented metrics that measure progress and success of a research collaboration continuously. Those metrics are derived from the collaboration requirements and help to focus on a commonly defined goal. An appropriate tool set evaluates and visualizes those metrics with minimal effort, and all participants will be pushed to focus on their tasks with appropriate effort. Thus project status, challenges and progress are transparent to all research collaboration members at any time.}, language = {en} } @inproceedings{SchreiberKraftZuendorf2017, author = {Schreiber, Marc and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Metrics driven research collaboration: focusing on common project goals continuously}, series = {Proceedings : 2017 IEEE/ACM 4th International Workshop on Software Engineering Research and Industrial Practice : SER\&IP 2017 : 21 May 2017 Buenos Aires, Argentina}, booktitle = {Proceedings : 2017 IEEE/ACM 4th International Workshop on Software Engineering Research and Industrial Practice : SER\&IP 2017 : 21 May 2017 Buenos Aires, Argentina}, editor = {Bilof, Randall}, publisher = {IEEE Press}, address = {Piscataway, NJ}, isbn = {978-1-5386-2797-6}, doi = {10.1109/SER-IP.2017..6}, pages = {41 -- 47}, year = {2017}, language = {en} } @inproceedings{SchreiberKraftZuendorf2018, author = {Schreiber, Marc and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {NLP Lean Programming Framework: Developing NLP Applications More Effectively}, series = {Proceedings of NAACL-HLT 2018: Demonstrations, New Orleans, Louisiana, June 2 - 4, 2018}, booktitle = {Proceedings of NAACL-HLT 2018: Demonstrations, New Orleans, Louisiana, June 2 - 4, 2018}, doi = {10.18653/v1/N18-5001 }, pages = {5 Seiten}, year = {2018}, abstract = {This paper presents NLP Lean Programming framework (NLPf), a new framework for creating custom natural language processing (NLP) models and pipelines by utilizing common software development build systems. This approach allows developers to train and integrate domain-specific NLP pipelines into their applications seamlessly. Additionally, NLPf provides an annotation tool which improves the annotation process significantly by providing a well-designed GUI and sophisticated way of using input devices. Due to NLPf's properties developers and domain experts are able to build domain-specific NLP applications more efficiently. NLPf is Opensource software and available at https:// gitlab.com/schrieveslaach/NLPf.}, language = {en} } @inproceedings{SchmidtsKraftSiebigterothetal.2019, author = {Schmidts, Oliver and Kraft, Bodo and Siebigteroth, Ines and Z{\"u}ndorf, Albert}, title = {Schema Matching with Frequent Changes on Semi-Structured Input Files: A Machine Learning Approach on Biological Product Data}, series = {Proceedings of the 21st International Conference on Enterprise Information Systems - Volume 1: ICEIS}, booktitle = {Proceedings of the 21st International Conference on Enterprise Information Systems - Volume 1: ICEIS}, isbn = {978-989-758-372-8}, doi = {10.5220/0007723602080215}, pages = {208 -- 215}, year = {2019}, language = {en} } @inproceedings{SiebigterothKraftSchmidtsetal.2019, author = {Siebigteroth, Ines and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {A Study on Improving Corpus Creation by Pair Annotation}, series = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, booktitle = {Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK-PS 2019)}, issn = {1613-0073}, pages = {40 -- 44}, year = {2019}, language = {en} } @inproceedings{SildatkeKarwanniKraftetal.2020, author = {Sildatke, Michael and Karwanni, Hendrik and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {Automated Software Quality Monitoring in Research Collaboration Projects}, series = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, booktitle = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, publisher = {IEEE}, address = {New York, NY}, doi = {10.1145/3387940.3391478}, pages = {603 -- 610}, year = {2020}, abstract = {In collaborative research projects, both researchers and practitioners work together solving business-critical challenges. These projects often deal with ETL processes, in which humans extract information from non-machine-readable documents by hand. AI-based machine learning models can help to solve this problem. Since machine learning approaches are not deterministic, their quality of output may decrease over time. This fact leads to an overall quality loss of the application which embeds machine learning models. Hence, the software qualities in development and production may differ. Machine learning models are black boxes. That makes practitioners skeptical and increases the inhibition threshold for early productive use of research prototypes. Continuous monitoring of software quality in production offers an early response capability on quality loss and encourages the use of machine learning approaches. Furthermore, experts have to ensure that they integrate possible new inputs into the model training as quickly as possible. In this paper, we introduce an architecture pattern with a reference implementation that extends the concept of Metrics Driven Research Collaboration with an automated software quality monitoring in productive use and a possibility to auto-generate new test data coming from processed documents in production. Through automated monitoring of the software quality and auto-generated test data, this approach ensures that the software quality meets and keeps requested thresholds in productive use, even during further continuous deployment and changing input data.}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2020, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of low-quality product data by attribute label ranking}, series = {Proceedings of the 9th International Conference on Data Science, Technology and Applications DATA - Volume 1}, booktitle = {Proceedings of the 9th International Conference on Data Science, Technology and Applications DATA - Volume 1}, publisher = {SciTePress}, address = {Set{\´u}bal, Portugal}, isbn = {978-989-758-440-4}, doi = {10.5220/0009831000900101}, pages = {90 -- 101}, year = {2020}, abstract = {The integration of product data from heterogeneous sources and manufacturers into a single catalog is often still a laborious, manual task. Especially small- and medium-sized enterprises face the challenge of timely integrating the data their business relies on to have an up-to-date product catalog, due to format specifications, low quality of data and the requirement of expert knowledge. Additionally, modern approaches to simplify catalog integration demand experience in machine learning, word vectorization, or semantic similarity that such enterprises do not have. Furthermore, most approaches struggle with low-quality data. We propose Attribute Label Ranking (ALR), an easy to understand and simple to adapt learning approach. ALR leverages a model trained on real-world integration data to identify the best possible schema mapping of previously unknown, proprietary, tabular format into a standardized catalog schema. Our approach predicts multiple labels for every attribute of an inpu t column. The whole column is taken into consideration to rank among these labels. We evaluate ALR regarding the correctness of predictions and compare the results on real-world data to state-of-the-art approaches. Additionally, we report findings during experiments and limitations of our approach.}, language = {en} } @inproceedings{KloeserKohlKraftetal.2021, author = {Kl{\"o}ser, Lars and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Multi-attribute relation extraction (MARE): simplifying the application of relation extraction}, series = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, booktitle = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-526-5}, doi = {10.5220/0010559201480156}, pages = {148 -- 156}, year = {2021}, abstract = {Natural language understanding's relation extraction makes innovative and encouraging novel business concepts possible and facilitates new digitilized decision-making processes. Current approaches allow the extraction of relations with a fixed number of entities as attributes. Extracting relations with an arbitrary amount of attributes requires complex systems and costly relation-trigger annotations to assist these systems. We introduce multi-attribute relation extraction (MARE) as an assumption-less problem formulation with two approaches, facilitating an explicit mapping from business use cases to the data annotations. Avoiding elaborated annotation constraints simplifies the application of relation extraction approaches. The evaluation compares our models to current state-of-the-art event extraction and binary relation extraction methods. Our approaches show improvement compared to these on the extraction of general multi-attribute relations.}, language = {en} } @inproceedings{KohlSchmidtsKloeseretal.2021, author = {Kohl, Philipp and Schmidts, Oliver and Kl{\"o}ser, Lars and Werth, Henri and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {STAMP 4 NLP - an agile framework for rapid quality-driven NLP applications development}, series = {Quality of Information and Communications Technology. QUATIC 2021}, booktitle = {Quality of Information and Communications Technology. QUATIC 2021}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85346-4}, doi = {10.1007/978-3-030-85347-1_12}, pages = {156 -- 166}, year = {2021}, abstract = {The progress in natural language processing (NLP) research over the last years, offers novel business opportunities for companies, as automated user interaction or improved data analysis. Building sophisticated NLP applications requires dealing with modern machine learning (ML) technologies, which impedes enterprises from establishing successful NLP projects. Our experience in applied NLP research projects shows that the continuous integration of research prototypes in production-like environments with quality assurance builds trust in the software and shows convenience and usefulness regarding the business goal. We introduce STAMP 4 NLP as an iterative and incremental process model for developing NLP applications. With STAMP 4 NLP, we merge software engineering principles with best practices from data science. Instantiating our process model allows efficiently creating prototypes by utilizing templates, conventions, and implementations, enabling developers and data scientists to focus on the business goals. Due to our iterative-incremental approach, businesses can deploy an enhanced version of the prototype to their software environment after every iteration, maximizing potential business value and trust early and avoiding the cost of successful yet never deployed experiments.}, language = {en} }