@inproceedings{AlhaskirTschescheLinkeetal.2023, author = {Alhaskir, Mohamed and Tschesche, Matteo and Linke, Florian and Schriewer, Elisabeth and Weber, Yvonne and Wolking, Stefan and R{\"o}hrig, Rainer and Koch, Henner and Kutafina, Ekaterina}, title = {ECG matching: an approach to synchronize ECG datasets for data quality comparisons}, series = {Proceedings of the 68th Annual Meeting of the German Association of Medical Informatics, Biometry, and Epidemiology e.V. (gmds) 2023}, volume = {307}, booktitle = {Proceedings of the 68th Annual Meeting of the German Association of Medical Informatics, Biometry, and Epidemiology e.V. (gmds) 2023}, editor = {R{\"o}hrig, Rainer and Grabe, Niels and Haag, Martin and H{\"u}bner, Ursula and Sax, Ulrich and Schmidt, Carsten Oliver and Sedlmayr, Martin and Zapf, Antonia}, publisher = {IOS Press}, isbn = {978-1-64368-428-4 (Print)}, doi = {10.3233/SHTI230718}, pages = {225 -- 232}, year = {2023}, abstract = {Clinical assessment of newly developed sensors is important for ensuring their validity. Comparing recordings of emerging electrocardiography (ECG) systems to a reference ECG system requires accurate synchronization of data from both devices. Current methods can be inefficient and prone to errors. To address this issue, three algorithms are presented to synchronize two ECG time series from different recording systems: Binned R-peak Correlation, R-R Interval Correlation, and Average R-peak Distance. These algorithms reduce ECG data to their cyclic features, mitigating inefficiencies and minimizing discrepancies between different recording systems. We evaluate the performance of these algorithms using high-quality data and then assess their robustness after manipulating the R-peaks. Our results show that R-R Interval Correlation was the most efficient, whereas the Average R-peak Distance and Binned R-peak Correlation were more robust against noisy data.}, language = {en} } @inproceedings{TischbeinKeanVertgewalletal.2023, author = {Tischbein, Franziska and Kean, Kilian and Vertgewall, Chris Martin and Ulbig, Andreas and Altherr, Lena}, title = {Determination of the topology of low-voltage distribution grids using cluster methods}, series = {27th International Conference on Electricity Distribution (CIRED 2023)}, booktitle = {27th International Conference on Electricity Distribution (CIRED 2023)}, publisher = {IEEE}, isbn = {978-1-83953-855-1}, doi = {10.1049/icp.2023.0478}, pages = {1 -- 5}, year = {2023}, abstract = {Due to the decarbonization of the energy sector, the electric distribution grids are undergoing a major transformation, which is expected to increase the load on the operating resources due to new electrical loads and distributed energy resources. Therefore, grid operators need to gradually move to active grid management in order to ensure safe and reliable grid operation. However, this requires knowledge of key grid variables, such as node voltages, which is why the mass integration of measurement technology (smart meters) is necessary. Another problem is the fact that a large part of the topology of the distribution grids is not sufficiently digitized and models are partly faulty, which means that active grid operation management today has to be carried out largely blindly. It is therefore part of current research to develop methods for determining unknown grid topologies based on measurement data. In this paper, different clustering algorithms are presented and their performance of topology detection of low voltage grids is compared. Furthermore, the influence of measurement uncertainties is investigated in the form of a sensitivity analysis.}, language = {en} } @inproceedings{SchulteTiggesMatheisRekeetal.2023, author = {Schulte-Tigges, Joschua and Matheis, Dominik and Reke, Michael and Walter, Thomas and Kaszner, Daniel}, title = {Demonstrating a V2X enabled system for transition of control and minimum risk manoeuvre when leaving the operational design domain}, series = {HCII 2023: HCI in Mobility, Transport, and Automotive Systems}, booktitle = {HCII 2023: HCI in Mobility, Transport, and Automotive Systems}, editor = {Kr{\"o}mker, Heidi}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-35677-3 (Print)}, doi = {10.1007/978-3-031-35678-0_12}, pages = {200 -- 210}, year = {2023}, abstract = {Modern implementations of driver assistance systems are evolving from a pure driver assistance to a independently acting automation system. Still these systems are not covering the full vehicle usage range, also called operational design domain, which require the human driver as fall-back mechanism. Transition of control and potential minimum risk manoeuvres are currently research topics and will bridge the gap until full autonomous vehicles are available. The authors showed in a demonstration that the transition of control mechanisms can be further improved by usage of communication technology. Receiving the incident type and position information by usage of standardised vehicle to everything (V2X) messages can improve the driver safety and comfort level. The connected and automated vehicle's software framework can take this information to plan areas where the driver should take back control by initiating a transition of control which can be followed by a minimum risk manoeuvre in case of an unresponsive driver. This transition of control has been implemented in a test vehicle and was presented to the public during the IEEE IV2022 (IEEE Intelligent Vehicle Symposium) in Aachen, Germany.}, language = {en} } @inproceedings{KueppersSchubaNeugebaueretal.2023, author = {K{\"u}ppers, Malte and Schuba, Marko and Neugebauer, Georg and H{\"o}ner, Tim and Hack, Sacha}, title = {Security analysis of the KNX smart building protocol}, series = {ARES '23: Proceedings of the 18th International Conference on Availability, Reliability and Security}, booktitle = {ARES '23: Proceedings of the 18th International Conference on Availability, Reliability and Security}, publisher = {ACM}, doi = {10.1145/3600160.3605167}, pages = {1 -- 7}, year = {2023}, abstract = {KNX is a protocol for smart building automation, e.g., for automated heating, air conditioning, or lighting. This paper analyses and evaluates state-of-the-art KNX devices from manufacturers Merten, Gira and Siemens with respect to security. On the one hand, it is investigated if publicly known vulnerabilities like insecure storage of passwords in software, unencrypted communication, or denialof-service attacks, can be reproduced in new devices. On the other hand, the security is analyzed in general, leading to the discovery of a previously unknown and high risk vulnerability related to so-called BCU (authentication) keys.}, language = {en} } @inproceedings{BuesgenKloeserKohletal.2023, author = {B{\"u}sgen, Andr{\´e} and Kl{\"o}ser, Lars and Kohl, Philipp and Schmidts, Oliver and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {From cracked accounts to fake IDs: user profiling on German telegram black market channels}, series = {Data Management Technologies and Applications}, booktitle = {Data Management Technologies and Applications}, editor = {Cuzzocrea, Alfredo and Gusikhin, Oleg and Hammoudi, Slimane and Quix, Christoph}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-37889-8 (Print)}, doi = {10.1007/978-3-031-37890-4_9}, pages = {176 -- 202}, year = {2023}, abstract = {Messenger apps like WhatsApp and Telegram are frequently used for everyday communication, but they can also be utilized as a platform for illegal activity. Telegram allows public groups with up to 200.000 participants. Criminals use these public groups for trading illegal commodities and services, which becomes a concern for law enforcement agencies, who manually monitor suspicious activity in these chat rooms. This research demonstrates how natural language processing (NLP) can assist in analyzing these chat rooms, providing an explorative overview of the domain and facilitating purposeful analyses of user behavior. We provide a publicly available corpus of annotated text messages with entities and relations from four self-proclaimed black market chat rooms. Our pipeline approach aggregates the extracted product attributes from user messages to profiles and uses these with their sold products as features for clustering. The extracted structured information is the foundation for further data exploration, such as identifying the top vendors or fine-granular price analyses. Our evaluation shows that pretrained word vectors perform better for unsupervised clustering than state-of-the-art transformer models, while the latter is still superior for sequence labeling.}, language = {en} } @inproceedings{KohlFreyerKraemeretal.2023, author = {Kohl, Philipp and Freyer, Nils and Kr{\"a}mer, Yoka and Werth, Henri and Wolf, Steffen and Kraft, Bodo and Meinecke, Matthias and Z{\"u}ndorf, Albert}, title = {ALE: a simulation-based active learning evaluation framework for the parameter-driven comparison of query strategies for NLP}, series = {Deep Learning Theory and Applications}, booktitle = {Deep Learning Theory and Applications}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {10.1007/978-3-031-39059-3_16}, pages = {235 -- 253}, year = {2023}, abstract = {Supervised machine learning and deep learning require a large amount of labeled data, which data scientists obtain in a manual, and time-consuming annotation process. To mitigate this challenge, Active Learning (AL) proposes promising data points to annotators they annotate next instead of a subsequent or random sample. This method is supposed to save annotation effort while maintaining model performance. However, practitioners face many AL strategies for different tasks and need an empirical basis to choose between them. Surveys categorize AL strategies into taxonomies without performance indications. Presentations of novel AL strategies compare the performance to a small subset of strategies. Our contribution addresses the empirical basis by introducing a reproducible active learning evaluation (ALE) framework for the comparative evaluation of AL strategies in NLP. The framework allows the implementation of AL strategies with low effort and a fair data-driven comparison through defining and tracking experiment parameters (e.g., initial dataset size, number of data points per query step, and the budget). ALE helps practitioners to make more informed decisions, and researchers can focus on developing new, effective AL strategies and deriving best practices for specific use cases. With best practices, practitioners can lower their annotation costs. We present a case study to illustrate how to use the framework.}, language = {en} } @inproceedings{FreyerThewesMeinecke2023, author = {Freyer, Nils and Thewes, Dustin and Meinecke, Matthias}, title = {GUIDO: a hybrid approach to guideline discovery \& ordering from natural language texts}, series = {Proceedings of the 12th International Conference on Data Science, Technology and Applications DATA - Volume 1}, booktitle = {Proceedings of the 12th International Conference on Data Science, Technology and Applications DATA - Volume 1}, editor = {Gusikhin, Oleg and Hammoudi, Slimane and Cuzzocrea, Alfredo}, isbn = {978-989-758-664-4}, issn = {2184-285X}, doi = {10.5220/0012084400003541}, pages = {335 -- 342}, year = {2023}, abstract = {Extracting workflow nets from textual descriptions can be used to simplify guidelines or formalize textual descriptions of formal processes like business processes and algorithms. The task of manually extracting processes, however, requires domain expertise and effort. While automatic process model extraction is desirable, annotating texts with formalized process models is expensive. Therefore, there are only a few machine-learning-based extraction approaches. Rule-based approaches, in turn, require domain specificity to work well and can rarely distinguish relevant and irrelevant information in textual descriptions. In this paper, we present GUIDO, a hybrid approach to the process model extraction task that first, classifies sentences regarding their relevance to the process model, using a BERT-based sentence classifier, and second, extracts a process model from the sentences classified as relevant, using dependency parsing. The presented approach achieves significantly better resul ts than a pure rule-based approach. GUIDO achieves an average behavioral similarity score of 0.93. Still, in comparison to purely machine-learning-based approaches, the annotation costs stay low.}, language = {en} } @inproceedings{KloeserBuesgenKohletal.2023, author = {Kl{\"o}ser, Lars and B{\"u}sgen, Andr{\´e} and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Explaining relation classification models with semantic extents}, series = {Deep Learning Theory and Applications}, booktitle = {Deep Learning Theory and Applications}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {10.1007/978-3-031-39059-3_13}, pages = {189 -- 208}, year = {2023}, abstract = {In recent years, the development of large pretrained language models, such as BERT and GPT, significantly improved information extraction systems on various tasks, including relation classification. State-of-the-art systems are highly accurate on scientific benchmarks. A lack of explainability is currently a complicating factor in many real-world applications. Comprehensible systems are necessary to prevent biased, counterintuitive, or harmful decisions. We introduce semantic extents, a concept to analyze decision patterns for the relation classification task. Semantic extents are the most influential parts of texts concerning classification decisions. Our definition allows similar procedures to determine semantic extents for humans and models. We provide an annotation tool and a software framework to determine semantic extents for humans and models conveniently and reproducibly. Comparing both reveals that models tend to learn shortcut patterns from data. These patterns are hard to detect with current interpretability methods, such as input reductions. Our approach can help detect and eliminate spurious decision patterns during model development. Semantic extents can increase the reliability and security of natural language processing systems. Semantic extents are an essential step in enabling applications in critical areas like healthcare or finance. Moreover, our work opens new research directions for developing methods to explain deep learning models.}, language = {en} } @inproceedings{HueningMund2023, author = {H{\"u}ning, Felix and Mund, Cindy}, title = {Integration of agile development in standard labs}, series = {51st Annual Conference of the European Society for Engineering Education (SEFI)}, booktitle = {51st Annual Conference of the European Society for Engineering Education (SEFI)}, doi = {10.21427/NK4Z-WS73}, pages = {11 Seiten}, year = {2023}, abstract = {In addition to the technical content, modern courses at university should also teach professional skills to enhance the competencies of students towards their future work. The competency driven approach including technical as well as professional skills makes it necessary to find a suitable way for the integration into the corresponding module in a scalable and flexible manner. Agile development, for example, is essential for the development of modern systems and applications and makes use of dedicated professional skills of the team members, like structured group dynamics and communication, to enable the fast and reliable development. This paper presents an easy to integrate and flexible approach to integrate Scrum, an agile development method, into the lab of an existing module. Due to the different role models of Scrum the students have an individual learning success, gain valuable insight into modern system development and strengthen their communication and organization skills. The approach is implemented and evaluated in the module Vehicle Systems, but it can be transferred easily to other technical courses as well. The evaluation of the implementation considers feedback of all stakeholders, students, supervisor and lecturers, and monitors the observations during project lifetime.}, language = {en} } @inproceedings{MaurerMiskiwAcostaetal.2023, author = {Maurer, Florian and Miskiw, Kim K. and Acosta, Rebeca Ramirez and Harder, Nick and Sander, Volker and Lehnhoff, Sebastian}, title = {Market abstraction of energy markets and policies - application in an agent-based modeling toolbox}, series = {EI.A 2023: Energy Informatics}, booktitle = {EI.A 2023: Energy Informatics}, editor = {Jorgensen, Bo Norregaard and Pereira da Silva, Luiz Carlos and Ma, Zheng}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-48651-7 (Print)}, doi = {10.1007/978-3-031-48652-4_10}, pages = {139 -- 157}, year = {2023}, abstract = {In light of emerging challenges in energy systems, markets are prone to changing dynamics and market design. Simulation models are commonly used to understand the changing dynamics of future electricity markets. However, existing market models were often created with specific use cases in mind, which limits their flexibility and usability. This can impose challenges for using a single model to compare different market designs. This paper introduces a new method of defining market designs for energy market simulations. The proposed concept makes it easy to incorporate different market designs into electricity market models by using relevant parameters derived from analyzing existing simulation tools, morphological categorization and ontologies. These parameters are then used to derive a market abstraction and integrate it into an agent-based simulation framework, allowing for a unified analysis of diverse market designs. Furthermore, we showcase the usability of integrating new types of long-term contracts and over-the-counter trading. To validate this approach, two case studies are demonstrated: a pay-as-clear market and a pay-as-bid long-term market. These examples demonstrate the capabilities of the proposed framework.}, language = {en} }