@inproceedings{MaurerMiskiwAcostaetal.2023, author = {Maurer, Florian and Miskiw, Kim K. and Acosta, Rebeca Ramirez and Harder, Nick and Sander, Volker and Lehnhoff, Sebastian}, title = {Market abstraction of energy markets and policies - application in an agent-based modeling toolbox}, series = {EI.A 2023: Energy Informatics}, booktitle = {EI.A 2023: Energy Informatics}, editor = {Jorgensen, Bo Norregaard and Pereira da Silva, Luiz Carlos and Ma, Zheng}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-48651-7 (Print)}, doi = {10.1007/978-3-031-48652-4_10}, pages = {139 -- 157}, year = {2023}, abstract = {In light of emerging challenges in energy systems, markets are prone to changing dynamics and market design. Simulation models are commonly used to understand the changing dynamics of future electricity markets. However, existing market models were often created with specific use cases in mind, which limits their flexibility and usability. This can impose challenges for using a single model to compare different market designs. This paper introduces a new method of defining market designs for energy market simulations. The proposed concept makes it easy to incorporate different market designs into electricity market models by using relevant parameters derived from analyzing existing simulation tools, morphological categorization and ontologies. These parameters are then used to derive a market abstraction and integrate it into an agent-based simulation framework, allowing for a unified analysis of diverse market designs. Furthermore, we showcase the usability of integrating new types of long-term contracts and over-the-counter trading. To validate this approach, two case studies are demonstrated: a pay-as-clear market and a pay-as-bid long-term market. These examples demonstrate the capabilities of the proposed framework.}, language = {en} } @inproceedings{KohlFreyerKraemeretal.2023, author = {Kohl, Philipp and Freyer, Nils and Kr{\"a}mer, Yoka and Werth, Henri and Wolf, Steffen and Kraft, Bodo and Meinecke, Matthias and Z{\"u}ndorf, Albert}, title = {ALE: a simulation-based active learning evaluation framework for the parameter-driven comparison of query strategies for NLP}, series = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, booktitle = {Deep Learning Theory and Applications. DeLTA 2023. Communications in Computer and Information Science}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {978-3-031-39059-3}, pages = {235 -- 253}, year = {2023}, abstract = {Supervised machine learning and deep learning require a large amount of labeled data, which data scientists obtain in a manual, and time-consuming annotation process. To mitigate this challenge, Active Learning (AL) proposes promising data points to annotators they annotate next instead of a subsequent or random sample. This method is supposed to save annotation effort while maintaining model performance. However, practitioners face many AL strategies for different tasks and need an empirical basis to choose between them. Surveys categorize AL strategies into taxonomies without performance indications. Presentations of novel AL strategies compare the performance to a small subset of strategies. Our contribution addresses the empirical basis by introducing a reproducible active learning evaluation (ALE) framework for the comparative evaluation of AL strategies in NLP. The framework allows the implementation of AL strategies with low effort and a fair data-driven comparison through defining and tracking experiment parameters (e.g., initial dataset size, number of data points per query step, and the budget). ALE helps practitioners to make more informed decisions, and researchers can focus on developing new, effective AL strategies and deriving best practices for specific use cases. With best practices, practitioners can lower their annotation costs. We present a case study to illustrate how to use the framework.}, language = {en} } @inproceedings{KloeserBuesgenKohletal.2023, author = {Kl{\"o}ser, Lars and B{\"u}sgen, Andr{\´e} and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Explaining relation classification models with semantic extents}, series = {DeLTA 2023: Deep Learning Theory and Applications}, booktitle = {DeLTA 2023: Deep Learning Theory and Applications}, editor = {Conte, Donatello and Fred, Ana and Gusikhin, Oleg and Sansone, Carlo}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-39058-6 (Print)}, doi = {10.1007/978-3-031-39059-3_13}, pages = {189 -- 208}, year = {2023}, abstract = {In recent years, the development of large pretrained language models, such as BERT and GPT, significantly improved information extraction systems on various tasks, including relation classification. State-of-the-art systems are highly accurate on scientific benchmarks. A lack of explainability is currently a complicating factor in many real-world applications. Comprehensible systems are necessary to prevent biased, counterintuitive, or harmful decisions. We introduce semantic extents, a concept to analyze decision patterns for the relation classification task. Semantic extents are the most influential parts of texts concerning classification decisions. Our definition allows similar procedures to determine semantic extents for humans and models. We provide an annotation tool and a software framework to determine semantic extents for humans and models conveniently and reproducibly. Comparing both reveals that models tend to learn shortcut patterns from data. These patterns are hard to detect with current interpretability methods, such as input reductions. Our approach can help detect and eliminate spurious decision patterns during model development. Semantic extents can increase the reliability and security of natural language processing systems. Semantic extents are an essential step in enabling applications in critical areas like healthcare or finance. Moreover, our work opens new research directions for developing methods to explain deep learning models.}, language = {en} } @inproceedings{BuesgenKloeserKohletal.2023, author = {B{\"u}sgen, Andr{\´e} and Kl{\"o}ser, Lars and Kohl, Philipp and Schmidts, Oliver and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {From cracked accounts to fake IDs: user profiling on German telegram black market channels}, series = {Data Management Technologies and Applications}, booktitle = {Data Management Technologies and Applications}, editor = {Cuzzocrea, Alfredo and Gusikhin, Oleg and Hammoudi, Slimane and Quix, Christoph}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-37889-8 (Print)}, doi = {10.1007/978-3-031-37890-4_9}, pages = {176 -- 202}, year = {2023}, abstract = {Messenger apps like WhatsApp and Telegram are frequently used for everyday communication, but they can also be utilized as a platform for illegal activity. Telegram allows public groups with up to 200.000 participants. Criminals use these public groups for trading illegal commodities and services, which becomes a concern for law enforcement agencies, who manually monitor suspicious activity in these chat rooms. This research demonstrates how natural language processing (NLP) can assist in analyzing these chat rooms, providing an explorative overview of the domain and facilitating purposeful analyses of user behavior. We provide a publicly available corpus of annotated text messages with entities and relations from four self-proclaimed black market chat rooms. Our pipeline approach aggregates the extracted product attributes from user messages to profiles and uses these with their sold products as features for clustering. The extracted structured information is the foundation for further data exploration, such as identifying the top vendors or fine-granular price analyses. Our evaluation shows that pretrained word vectors perform better for unsupervised clustering than state-of-the-art transformer models, while the latter is still superior for sequence labeling.}, language = {en} }