@inproceedings{SildatkeKarwanniKraftetal.2020, author = {Sildatke, Michael and Karwanni, Hendrik and Kraft, Bodo and Schmidts, Oliver and Z{\"u}ndorf, Albert}, title = {Automated Software Quality Monitoring in Research Collaboration Projects}, series = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, booktitle = {ICSEW'20: Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops}, publisher = {IEEE}, address = {New York, NY}, doi = {10.1145/3387940.3391478}, pages = {603 -- 610}, year = {2020}, abstract = {In collaborative research projects, both researchers and practitioners work together solving business-critical challenges. These projects often deal with ETL processes, in which humans extract information from non-machine-readable documents by hand. AI-based machine learning models can help to solve this problem. Since machine learning approaches are not deterministic, their quality of output may decrease over time. This fact leads to an overall quality loss of the application which embeds machine learning models. Hence, the software qualities in development and production may differ. Machine learning models are black boxes. That makes practitioners skeptical and increases the inhibition threshold for early productive use of research prototypes. Continuous monitoring of software quality in production offers an early response capability on quality loss and encourages the use of machine learning approaches. Furthermore, experts have to ensure that they integrate possible new inputs into the model training as quickly as possible. In this paper, we introduce an architecture pattern with a reference implementation that extends the concept of Metrics Driven Research Collaboration with an automated software quality monitoring in productive use and a possibility to auto-generate new test data coming from processed documents in production. Through automated monitoring of the software quality and auto-generated test data, this approach ensures that the software quality meets and keeps requested thresholds in productive use, even during further continuous deployment and changing input data.}, language = {en} } @techreport{BarnatArntzBerneckeretal.2024, type = {Working Paper}, author = {Barnat, Miriam and Arntz, Kristian and Bernecker, Andreas and Fissabre, Anke and Franken, Norbert and Goldbach, Daniel and H{\"u}ning, Felix and J{\"o}rissen, J{\"o}rg and Kirsch, Ansgar and Pettrak, J{\"u}rgen and Rexforth, Matthias and Josef, Rosenkranz and Terstegge, Andreas}, title = {Strategische Gestaltung von Studieng{\"a}ngen f{\"u}r die Zukunft: Ein kollaborativ entwickeltes Self-Assessment}, series = {Hochschulforum Digitalisierung - Diskussionspapier}, journal = {Hochschulforum Digitalisierung - Diskussionspapier}, publisher = {Stifterverband f{\"u}r die Deutsche Wissenschaft}, address = {Berlin}, issn = {2365-7081}, pages = {16 Seiten}, year = {2024}, abstract = {Das Diskussionspapier beschreibt einen Prozess an der FH Aachen zur Entwicklung und Implementierung eines Self-Assessment-Tools f{\"u}r Studieng{\"a}nge. Dieser Prozess zielte darauf ab, die Relevanz der Themen Digitalisierung, Internationalisierung und Nachhaltigkeit in Studieng{\"a}ngen zu st{\"a}rken. Durch Workshops und kollaborative Entwicklung mit Studiendekan:innen entstand ein Fragebogen, der zur Reflexion und strategischen Weiterentwicklung der Studieng{\"a}nge dient.}, language = {de} }