@inproceedings{KloeserKohlKraftetal.2021, author = {Kl{\"o}ser, Lars and Kohl, Philipp and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {Multi-attribute relation extraction (MARE): simplifying the application of relation extraction}, series = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, booktitle = {Proceedings of the 2nd International Conference on Deep Learning Theory and Applications DeLTA - Volume 1}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-526-5}, doi = {10.5220/0010559201480156}, pages = {148 -- 156}, year = {2021}, abstract = {Natural language understanding's relation extraction makes innovative and encouraging novel business concepts possible and facilitates new digitilized decision-making processes. Current approaches allow the extraction of relations with a fixed number of entities as attributes. Extracting relations with an arbitrary amount of attributes requires complex systems and costly relation-trigger annotations to assist these systems. We introduce multi-attribute relation extraction (MARE) as an assumption-less problem formulation with two approaches, facilitating an explicit mapping from business use cases to the data annotations. Avoiding elaborated annotation constraints simplifies the application of relation extraction approaches. The evaluation compares our models to current state-of-the-art event extraction and binary relation extraction methods. Our approaches show improvement compared to these on the extraction of general multi-attribute relations.}, language = {en} } @article{KlettkeHomburgGell2015, author = {Klettke, Tanja and Homburg, Carsten and Gell, Sebastian}, title = {How to measure analyst forecast effort}, series = {European Accounting Review}, volume = {24}, journal = {European Accounting Review}, number = {1}, publisher = {Taylor \& Francis}, address = {London}, issn = {0963-8180}, doi = {10.1080/09638180.2014.909291}, pages = {129 -- 146}, year = {2015}, abstract = {We introduce a new way to measure the forecast effort that analysts devote to their earnings forecasts by measuring the analyst's general effort for all covered firms. While the commonly applied effort measure is based on analyst behaviour for one firm, our measure considers analyst behaviour for all covered firms. Our general effort measure captures additional information about analyst effort and thus can identify accurate forecasts. We emphasise the importance of investigating analyst behaviour in a larger context and argue that analysts who generally devote substantial forecast effort are also likely to devote substantial effort to a specific firm, even if this effort might not be captured by a firm-specific measure. Empirical results reveal that analysts who devote higher general forecast effort issue more accurate forecasts. Additional investigations show that analysts' career prospects improve with higher general forecast effort. Our measure improves on existing methods as it has higher explanatory power regarding differences in forecast accuracy than the commonly applied effort measure. Additionally, it can address research questions that cannot be examined with a firm-specific measure. It provides a simple but comprehensive way to identify accurate analysts.}, language = {en} } @book{Gell2012, author = {Gell, Sebastian}, title = {Determinants of earnings forecast error, earnings forecast revision and earnings forecast accuracy}, publisher = {Springer Gabler}, address = {Wiesbaden}, isbn = {978-3-8349-3936-4}, doi = {10.1007/978-3-8349-3937-1}, pages = {XXIV, 125 Seiten}, year = {2012}, abstract = {​Earnings forecasts are ubiquitous in today's financial markets. They are essential indicators of future firm performance and a starting point for firm valuation. Extremely inaccurate and overoptimistic forecasts during the most recent financial crisis have raised serious doubts regarding the reliability of such forecasts. This thesis therefore investigates new determinants of forecast errors and accuracy. In addition, new determinants of forecast revisions are examined. More specifically, the thesis answers the following questions: 1) How do analyst incentives lead to forecast errors? 2) How do changes in analyst incentives lead to forecast revisions?, and 3) What factors drive differences in forecast accuracy?}, language = {en} } @inproceedings{HueningWacheMagiera2021, author = {H{\"u}ning, Felix and Wache, Franz-Josef and Magiera, David}, title = {Redundant bus systems using dual-mode radio}, series = {Proceedings of Sixth International Congress on Information and Communication Technology}, booktitle = {Proceedings of Sixth International Congress on Information and Communication Technology}, publisher = {Springer}, address = {Singapore}, isbn = {978-981-16-2379-0}, doi = {10.1007/978-981-16-2380-6_73}, pages = {835 -- 842}, year = {2021}, abstract = {Communication via serial bus systems, like CAN, plays an important role for all kinds of embedded electronic and mechatronic systems. To cope up with the requirements for functional safety of safety-critical applications, there is a need to enhance the safety features of the communication systems. One measure to achieve a more robust communication is to add redundant data transmission path to the applications. In general, the communication of real-time embedded systems like automotive applications is tethered, and the redundant data transmission lines are also tethered, increasing the size of the wiring harness and the weight of the system. A radio link is preferred as a redundant transmission line as it uses a complementary transmission medium compared to the wired solution and in addition reduces wiring harness size and weight. Standard wireless links like Wi-Fi or Bluetooth cannot meet the requirements for real-time capability with regard to bus communication. Using the new dual-mode radio enables a redundant transmission line meeting all requirements with regard to real-time capability, robustness and transparency for the data bus. In addition, it provides a complementary transmission medium with regard to commonly used tethered links. A CAN bus system is used to demonstrate the redundant data transfer via tethered and wireless CAN.}, language = {en} } @article{JungStaat2020, author = {Jung, Alexander and Staat, Manfred}, title = {Erratum to "Modeling and simulation of human induced pluripotent stem cell-derived cardiac tissue" [GAMM-Mitteilungen, (2019), 42, 4, 10.1002/gamm.201900002]}, series = {GAMM-Mitteilungen}, volume = {43}, journal = {GAMM-Mitteilungen}, number = {4}, publisher = {Wiley-VCH GmbH}, address = {Weinheim}, issn = {1522-2608}, doi = {10.1002/gamm.202000011}, year = {2020}, language = {en} } @misc{JungMuellerStaat2021, author = {Jung, Alexander and M{\"u}ller, Wolfram and Staat, Manfred}, title = {Corrigendum to "Wind and fairness in ski jumping: A computer modelling analysis" [J. Biomech. 75 (2018) 147-153]}, series = {Journal of Biomechanics}, volume = {128}, journal = {Journal of Biomechanics}, number = {Article number: 110690}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0021-9290}, doi = {10.1016/j.jbiomech.2021.110690}, pages = {1 Seite}, year = {2021}, language = {en} } @book{DiktaScheer2021, author = {Dikta, Gerhard and Scheer, Marsel}, title = {Bootstrap Methods: With Applications in R}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-73480-0}, doi = {10.1007/978-3-030-73480-0}, pages = {XVI, 256 Seiten}, year = {2021}, abstract = {This book provides a compact introduction to the bootstrap method. In addition to classical results on point estimation and test theory, multivariate linear regression models and generalized linear models are covered in detail. Special attention is given to the use of bootstrap procedures to perform goodness-of-fit tests to validate model or distributional assumptions. In some cases, new methods are presented here for the first time. The text is motivated by practical examples and the implementations of the corresponding algorithms are always given directly in R in a comprehensible form. Overall, R is given great importance throughout. Each chapter includes a section of exercises and, for the more mathematically inclined readers, concludes with rigorous proofs. The intended audience is graduate students who already have a prior knowledge of probability theory and mathematical statistics.}, language = {en} } @inproceedings{SchmidtsKraftWinkensetal.2021, author = {Schmidts, Oliver and Kraft, Bodo and Winkens, Marvin and Z{\"u}ndorf, Albert}, title = {Catalog integration of heterogeneous and volatile product data}, series = {DATA 2020: Data Management Technologies and Applications}, booktitle = {DATA 2020: Data Management Technologies and Applications}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-83013-7}, doi = {10.1007/978-3-030-83014-4_7}, pages = {134 -- 153}, year = {2021}, abstract = {The integration of frequently changing, volatile product data from different manufacturers into a single catalog is a significant challenge for small and medium-sized e-commerce companies. They rely on timely integrating product data to present them aggregated in an online shop without knowing format specifications, concept understanding of manufacturers, and data quality. Furthermore, format, concepts, and data quality may change at any time. Consequently, integrating product catalogs into a single standardized catalog is often a laborious manual task. Current strategies to streamline or automate catalog integration use techniques based on machine learning, word vectorization, or semantic similarity. However, most approaches struggle with low-quality or real-world data. We propose Attribute Label Ranking (ALR) as a recommendation engine to simplify the integration process of previously unknown, proprietary tabular format into a standardized catalog for practitioners. We evaluate ALR by focusing on the impact of different neural network architectures, language features, and semantic similarity. Additionally, we consider metrics for industrial application and present the impact of ALR in production and its limitations.}, language = {en} } @inproceedings{KohlSchmidtsKloeseretal.2021, author = {Kohl, Philipp and Schmidts, Oliver and Kl{\"o}ser, Lars and Werth, Henri and Kraft, Bodo and Z{\"u}ndorf, Albert}, title = {STAMP 4 NLP - an agile framework for rapid quality-driven NLP applications development}, series = {Quality of Information and Communications Technology. QUATIC 2021}, booktitle = {Quality of Information and Communications Technology. QUATIC 2021}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85346-4}, doi = {10.1007/978-3-030-85347-1_12}, pages = {156 -- 166}, year = {2021}, abstract = {The progress in natural language processing (NLP) research over the last years, offers novel business opportunities for companies, as automated user interaction or improved data analysis. Building sophisticated NLP applications requires dealing with modern machine learning (ML) technologies, which impedes enterprises from establishing successful NLP projects. Our experience in applied NLP research projects shows that the continuous integration of research prototypes in production-like environments with quality assurance builds trust in the software and shows convenience and usefulness regarding the business goal. We introduce STAMP 4 NLP as an iterative and incremental process model for developing NLP applications. With STAMP 4 NLP, we merge software engineering principles with best practices from data science. Instantiating our process model allows efficiently creating prototypes by utilizing templates, conventions, and implementations, enabling developers and data scientists to focus on the business goals. Due to our iterative-incremental approach, businesses can deploy an enhanced version of the prototype to their software environment after every iteration, maximizing potential business value and trust early and avoiding the cost of successful yet never deployed experiments.}, language = {en} } @article{BraunChengDoweyetal.2021, author = {Braun, Sebastian and Cheng, Chi-Tsun and Dowey, Steve and Wollert, J{\"o}rg}, title = {Performance evaluation of skill-based order-assignment in production environments with multi-agent systems}, series = {IEEE Journal of Emerging and Selected Topics in Industrial Electronics}, journal = {IEEE Journal of Emerging and Selected Topics in Industrial Electronics}, number = {Early Access}, publisher = {IEEE}, address = {New York}, issn = {2687-9735}, doi = {10.1109/JESTIE.2021.3108524}, year = {2021}, abstract = {The fourth industrial revolution introduces disruptive technologies to production environments. One of these technologies are multi-agent systems (MASs), where agents virtualize machines. However, the agent's actual performances in production environments can hardly be estimated as most research has been focusing on isolated projects and specific scenarios. We address this gap by implementing a highly connected and configurable reference model with quantifiable key performance indicators (KPIs) for production scheduling and routing in single-piece workflows. Furthermore, we propose an algorithm to optimize the search of extrema in highly connected distributed systems. The benefits, limits, and drawbacks of MASs and their performances are evaluated extensively by event-based simulations against the introduced model, which acts as a benchmark. Even though the performance of the proposed MAS is, on average, slightly lower than the reference system, the increased flexibility allows it to find new solutions and deliver improved factory-planning outcomes. Our MAS shows an emerging behavior by using flexible production techniques to correct errors and compensate for bottlenecks. This increased flexibility offers substantial improvement potential. The general model in this paper allows the transfer of the results to estimate real systems or other models.}, language = {en} }