@inproceedings{KloeserKohlKraftetal.2021, author = {Kl{\"o}ser, Lars and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Multi-attribute relation extraction (MARE): simplifying the application of relation extraction}, series = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, booktitle = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-526-5}, doi = {10.5220/0010559201480156}, pages = {148 -- 156}, year = {2021}, abstract = {Natural language understanding's relation extraction makes innovative and encouraging novel business concepts possible and facilitates new digitilized decision-making processes. Current approaches allow the extraction of relations with a fixed number of entities as attributes. Extracting relations with an arbitrary amount of attributes requires complex systems and costly relation-trigger annotations to assist these systems. We introduce multi-attribute relation extraction (MARE) as an assumption-less problem formulation with two approaches, facilitating an explicit mapping from business use cases to the data annotations. Avoiding elaborated annotation constraints simplifies the application of relation extraction approaches. The evaluation compares our models to current state-of-the-art event extraction and binary relation extraction methods. Our approaches show improvement compared to these on the extraction of general multi-attribute relations.}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2021, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of heterogeneous and volatile product data}, series = {DATA 2020: Data Management Technologies and Applications}, booktitle = {DATA 2020: Data Management Technologies and Applications}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-83013-7}, doi = {10.1007/978-3-030-83014-4_7}, pages = {134 -- 153}, year = {2021}, abstract = {The integration of frequently changing, volatile product data from different manufacturers into a single catalog is a significant challenge for small and medium-sized e-commerce companies. They rely on timely integrating product data to present them aggregated in an online shop without knowing format specifications, concept understanding of manufacturers, and data quality. Furthermore, format, concepts, and data quality may change at any time. Consequently, integrating product catalogs into a single standardized catalog is often a laborious manual task. Current strategies to streamline or automate catalog integration use techniques based on machine learning, word vectorization, or semantic similarity. However, most approaches struggle with low-quality or real-world data. We propose Attribute Label Ranking (ALR) as a recommendation engine to simplify the integration process of previously unknown, proprietary tabular format into a standardized catalog for practitioners. We evaluate ALR by focusing on the impact of different neural network architectures, language features, and semantic similarity. Additionally, we consider metrics for industrial application and present the impact of ALR in production and its limitations.}, language = {en} } @inproceedings{KohlSchmidtsKloeseretal.2021, author = {Kohl, Philipp and Schmidts, Oliver and Kl{\"o}ser, Lars and Werth, Henri and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {STAMP 4 NLP - an agile framework for rapid quality-driven NLP applications development}, series = {Quality of Information and Communications Technology. QUATIC 2021}, booktitle = {Quality of Information and Communications Technology. QUATIC 2021}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85346-4}, doi = {10.1007/978-3-030-85347-1_12}, pages = {156 -- 166}, year = {2021}, abstract = {The progress in natural language processing (NLP) research over the last years, offers novel business opportunities for companies, as automated user interaction or improved data analysis. Building sophisticated NLP applications requires dealing with modern machine learning (ML) technologies, which impedes enterprises from establishing successful NLP projects. Our experience in applied NLP research projects shows that the continuous integration of research prototypes in production-like environments with quality assurance builds trust in the software and shows convenience and usefulness regarding the business goal. We introduce STAMP 4 NLP as an iterative and incremental process model for developing NLP applications. With STAMP 4 NLP, we merge software engineering principles with best practices from data science. Instantiating our process model allows efficiently creating prototypes by utilizing templates, conventions, and implementations, enabling developers and data scientists to focus on the business goals. Due to our iterative-incremental approach, businesses can deploy an enhanced version of the prototype to their software environment after every iteration, maximizing potential business value and trust early and avoiding the cost of successful yet never deployed experiments.}, language = {en} } @inproceedings{BornheimGriegerBialonski2021, author = {Bornheim, Tobias and Grieger, Niklas and Bialonski, Stephan}, title = {FHAC at GermEval 2021: Identifying German toxic, engaging, and fact-claiming comments with ensemble learning}, series = {Proceedings of the GermEval 2021 Workshop on the Identification of Toxic, Engaging, and Fact-Claiming Comments : 17th Conference on Natural Language Processing KONVENS 2021}, booktitle = {Proceedings of the GermEval 2021 Workshop on the Identification of Toxic, Engaging, and Fact-Claiming Comments : 17th Conference on Natural Language Processing KONVENS 2021}, publisher = {Heinrich Heine University}, address = {D{\"u}sseldorf}, doi = {10.48415/2021/fhw5-x128}, pages = {105 -- 111}, year = {2021}, language = {en} } @inproceedings{OlderogMohrBegingetal.2021, author = {Olderog, M. and Mohr, P. and Beging, Stefan and Tsoumpas, C. and Ziemons, Karl}, title = {Simulation study on the role of tissue-scattered events in improving sensitivity for a compact time of flight compton positron emission tomograph}, series = {2020 IEEE Nuclear Science Symposium and Medical Imaging Conference (NSS/MIC)}, booktitle = {2020 IEEE Nuclear Science Symposium and Medical Imaging Conference (NSS/MIC)}, publisher = {IEEE}, address = {New York, NY}, isbn = {978-1-7281-7693-2}, doi = {10.1109/NSS/MIC42677.2020.9507901}, pages = {4 Seiten}, year = {2021}, abstract = {In positron emission tomography improving time, energy and spatial detector resolutions and using Compton kinematics introduces the possibility to reconstruct a radioactivity distribution image from scatter coincidences, thereby enhancing image quality. The number of single scattered coincidences alone is in the same order of magnitude as true coincidences. In this work, a compact Compton camera module based on monolithic scintillation material is investigated as a detector ring module. The detector interactions are simulated with Monte Carlo package GATE. The scattering angle inside the tissue is derived from the energy of the scattered photon, which results in a set of possible scattering trajectories or broken line of response. The Compton kinematics collimation reduces the number of solutions. Additionally, the time of flight information helps localize the position of the annihilation. One of the questions of this investigation is related to how the energy, spatial and temporal resolutions help confine the possible annihilation volume. A comparison of currently technically feasible detector resolutions (under laboratory conditions) demonstrates the influence on this annihilation volume and shows that energy and coincidence time resolution have a significant impact. An enhancement of the latter from 400 ps to 100 ps leads to a smaller annihilation volume of around 50\%, while a change of the energy resolution in the absorber layer from 12\% to 4.5\% results in a reduction of 60\%. The inclusion of single tissue-scattered data has the potential to increase the sensitivity of a scanner by a factor of 2 to 3 times. The concept can be further optimized and extended for multiple scatter coincidences and subsequently validated by a reconstruction algorithm.}, language = {en} } @inproceedings{TranStaat2021, author = {Tran, Ngoc Trinh and Staat, Manfred}, title = {FEM shakedown analysis of Kirchhoff-Love plates under uncertainty of strength}, series = {Proceedings of UNCECOMP 2021}, booktitle = {Proceedings of UNCECOMP 2021}, isbn = {978-618-85072-6-5}, doi = {10.7712/120221.8041.19047}, pages = {323 -- 338}, year = {2021}, abstract = {A new formulation to calculate the shakedown limit load of Kirchhoff plates under stochastic conditions of strength is developed. Direct structural reliability design by chance con-strained programming is based on the prescribed failure probabilities, which is an effective approach of stochastic programming if it can be formulated as an equivalent deterministic optimization problem. We restrict uncertainty to strength, the loading is still deterministic. A new formulation is derived in case of random strength with lognormal distribution. Upper bound and lower bound shakedown load factors are calculated simultaneously by a dual algorithm.}, language = {en} } @inproceedings{IomdinaKiselevaKotliaretal.2020, author = {Iomdina, Elena N. and Kiseleva, Anna A. and Kotliar, Konstantin and Luzhnov, Petr V.}, title = {Quantification of Choroidal Blood Flow Using the OCT-A System Based on Voxel Scan Processing}, series = {Proceedings of the International Conference on Biomedical Innovations and Applications- BIA 2020}, booktitle = {Proceedings of the International Conference on Biomedical Innovations and Applications- BIA 2020}, publisher = {IEEE}, address = {New York, NY}, isbn = {978-1-7281-7073-2}, doi = {10.1109/BIA50171.2020.9244511}, pages = {41 -- 44}, year = {2020}, abstract = {The paper presents a method for the quantitative assessment of choroidal blood flow using an OCT-A system. The developed technique for processing of OCT-A scans is divided into two stages. At the first stage, the identification of the boundaries in the selected portion was performed. At the second stage, each pixel mark on the selected layer was represented as a volume unit, a voxel, which characterizes the region of moving blood. Three geometric shapes were considered to represent the voxel. On the example of one OCT-A scan, this work presents a quantitative assessment of the blood flow index. A possible modification of two-stage algorithm based on voxel scan processing is presented.}, language = {en} } @inproceedings{SildatkeKarwanniKraftetal.2020, author = {Sildatke, Michael and Karwanni, Hendrik and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {Automated Software Quality Monitoring in Research Collaboration Projects}, series = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, booktitle = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, publisher = {IEEE}, address = {New York, NY}, doi = {10.1145/3387940.3391478}, pages = {603 -- 610}, year = {2020}, abstract = {In collaborative research projects, both researchers and practitioners work together solving business-critical challenges. These projects often deal with ETL processes, in which humans extract information from non-machine-readable documents by hand. AI-based machine learning models can help to solve this problem. Since machine learning approaches are not deterministic, their quality of output may decrease over time. This fact leads to an overall quality loss of the application which embeds machine learning models. Hence, the software qualities in development and production may differ. Machine learning models are black boxes. That makes practitioners skeptical and increases the inhibition threshold for early productive use of research prototypes. Continuous monitoring of software quality in production offers an early response capability on quality loss and encourages the use of machine learning approaches. Furthermore, experts have to ensure that they integrate possible new inputs into the model training as quickly as possible. In this paper, we introduce an architecture pattern with a reference implementation that extends the concept of Metrics Driven Research Collaboration with an automated software quality monitoring in productive use and a possibility to auto-generate new test data coming from processed documents in production. Through automated monitoring of the software quality and auto-generated test data, this approach ensures that the software quality meets and keeps requested thresholds in productive use, even during further continuous deployment and changing input data.}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2020, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of low-quality product data by attribute label ranking}, series = {Proceedings of the 9th International Conference on Data Science, Technology and Applications DATA - Volume 1}, booktitle = {Proceedings of the 9th International Conference on Data Science, Technology and Applications DATA - Volume 1}, publisher = {SciTePress}, address = {Set{\´u}bal, Portugal}, isbn = {978-989-758-440-4}, doi = {10.5220/0009831000900101}, pages = {90 -- 101}, year = {2020}, abstract = {The integration of product data from heterogeneous sources and manufacturers into a single catalog is often still a laborious, manual task. Especially small- and medium-sized enterprises face the challenge of timely integrating the data their business relies on to have an up-to-date product catalog, due to format specifications, low quality of data and the requirement of expert knowledge. Additionally, modern approaches to simplify catalog integration demand experience in machine learning, word vectorization, or semantic similarity that such enterprises do not have. Furthermore, most approaches struggle with low-quality data. We propose Attribute Label Ranking (ALR), an easy to understand and simple to adapt learning approach. ALR leverages a model trained on real-world integration data to identify the best possible schema mapping of previously unknown, proprietary, tabular format into a standardized catalog schema. Our approach predicts multiple labels for every attribute of an inpu t column. The whole column is taken into consideration to rank among these labels. We evaluate ALR regarding the correctness of predictions and compare the results on real-world data to state-of-the-art approaches. Additionally, we report findings during experiments and limitations of our approach.}, language = {en} } @inproceedings{PohleFroehlichDalitzRichteretal.2020, author = {Pohle-Fr{\"o}hlich, Regina and Dalitz, Christoph and Richter, Charlotte and Hahnen, Tobias and St{\"a}udle, Benjamin and Albracht, Kirsten}, title = {Estimation of muscle fascicle orientation in ultrasonic images}, series = {Proceedings of the 15th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 5}, booktitle = {Proceedings of the 15th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 5}, publisher = {SciTePress}, address = {Set{\´u}bal, Portugal}, isbn = {978-989-758-402-2}, doi = {10.5220/0008933900790086}, pages = {79 -- 86}, year = {2020}, abstract = {We compare four different algorithms for automatically estimating the muscle fascicle angle from ultrasonic images: the vesselness filter, the Radon transform, the projection profile method and the gray level cooccurence matrix (GLCM). The algorithm results are compared to ground truth data generated by three different experts on 425 image frames from two videos recorded during different types of motion. The best agreement with the ground truth data was achieved by a combination of pre-processing with a vesselness filter and measuring the angle with the projection profile method. The robustness of the estimation is increased by applying the algorithms to subregions with high gradients and performing a LOESS fit through these estimates.}, language = {en} }