@article{EmontsBuyel2023, author = {Emonts, Jessica and Buyel, Johannes Felix}, title = {An overview of descriptors to capture protein properties - Tools and perspectives in the context of QSAR modeling}, series = {Computational and Structural Biotechnology Journal}, journal = {Computational and Structural Biotechnology Journal}, number = {21}, publisher = {Research Network of Computational and Structural Biotechnology}, address = {Gotenburg}, issn = {2001-0370 (online-ressource)}, doi = {10.1016/j.csbj.2023.05.022}, pages = {3234 -- 3247}, year = {2023}, abstract = {Proteins are important ingredients in food and feed, they are the active components of many pharmaceutical products, and they are necessary, in the form of enzymes, for the success of many technical processes. However, production can be challenging, especially when using heterologous host cells such as bacteria to express and assemble recombinant mammalian proteins. The manufacturability of proteins can be hindered by low solubility, a tendency to aggregate, or inefficient purification. Tools such as in silico protein engineering and models that predict separation criteria can overcome these issues but usually require the complex shape and surface properties of proteins to be represented by a small number of quantitative numeric values known as descriptors, as similarly used to capture the features of small molecules. Here, we review the current status of protein descriptors, especially for application in quantitative structure activity relationship (QSAR) models. First, we describe the complexity of proteins and the properties that descriptors must accommodate. Then we introduce descriptors of shape and surface properties that quantify the global and local features of proteins. Finally, we highlight the current limitations of protein descriptors and propose strategies for the derivation of novel protein descriptors that are more informative.}, language = {en} } @article{EmonsHuellenkremerSchoening2001, author = {Emons, H. and H{\"u}llenkremer, B. and Sch{\"o}ning, Michael Josef}, title = {Detection of metal ions in aqueous solutions by voltohmmetry}, series = {Fresenius' Journal of Analytical Chemistry. 369 (2001), H. 1}, journal = {Fresenius' Journal of Analytical Chemistry. 369 (2001), H. 1}, isbn = {0937-0633}, pages = {42 -- 46}, year = {2001}, language = {en} } @article{EmonsGlueckSchoening2001, author = {Emons, H. and Gl{\"u}ck, O. and Sch{\"o}ning, Michael Josef}, title = {Voltohmmetry - An alternative detection principle at ultrathin metal electrodes in solution}, series = {Chemical and biological sensors and analytical methods : proceedings of the international symposium / Sensor, Physical Electrochemistry, and Organic and Biological Electrochemistry Divisions. Ed.: M. Butler}, journal = {Chemical and biological sensors and analytical methods : proceedings of the international symposium / Sensor, Physical Electrochemistry, and Organic and Biological Electrochemistry Divisions. Ed.: M. Butler}, publisher = {Electrochemical Society}, address = {Pennington, NJ}, isbn = {1-56677-351-2}, pages = {1 -- 3}, year = {2001}, language = {en} } @article{EmonsGlueckHuellenkremeretal.2001, author = {Emons, H. and Gl{\"u}ck, O. and H{\"u}llenkremer, B. and Sch{\"o}ning, Michael Josef}, title = {Voltohmmetry as an alternative detection method at polycrystalline metal film electrodes}, series = {Electroanalysis. 13 (2001), H. 8-9}, journal = {Electroanalysis. 13 (2001), H. 8-9}, isbn = {1040-0397}, pages = {677 -- 680}, year = {2001}, language = {en} } @article{EmonsBaadeSchoening2000, author = {Emons, H. and Baade, A. and Sch{\"o}ning, Michael Josef}, title = {Voltammetric determination of heavy metals in microvolumes of rain water}, series = {Electroanalysis. 12 (2000), H. 15}, journal = {Electroanalysis. 12 (2000), H. 15}, isbn = {1040-0397}, pages = {1171 -- 1176}, year = {2000}, language = {en} } @article{EmigHebelSchwark2022, author = {Emig, J. and Hebel, Christoph and Schwark, A.}, title = {Einsatzbereiche f{\"u}r Verkehrsnachfragemodelle}, series = {Straßenverkehrstechnik}, volume = {66}, journal = {Straßenverkehrstechnik}, number = {10}, publisher = {Kirschbaum Verlag GmbH}, address = {Bonn}, issn = {0039-2219}, doi = {10.53184/SVT10-2022-2}, pages = {727 -- 736}, year = {2022}, abstract = {In der Praxis bestehen vielf{\"a}ltige Einsatzbereiche f{\"u}r Verkehrsnachfragemodelle. Mit ihnen k{\"o}nnen Kenngr{\"o}ßen des Verkehrsangebots und der Verkehrsnachfrage f{\"u}r den heutigen Zustand wie auch f{\"u}r zuk{\"u}nftige Zust{\"a}nde bereitgestellt werden, um so die Grundlagen f{\"u}r verkehrsplanerische Entscheidungen zu liefern. Die neuen „Empfehlungen zum Einsatz von Verkehrsnachfragemodellen f{\"u}r den Personenverkehr" (EVNM-PV) (FGSV 2022) veranschaulichen anhand von typischen Planungsaufgaben, welche differenzierten Anforderungen daraus f{\"u}r die Modellkonzeption und -erstellung resultieren. Vor dem Hintergrund der konkreten Aufgabenstellung sowie deren spezifischer planerischer Anforderungen bildet die abzuleitende Modellspezifikation die verabredete Grundlage zwischen Auftraggeber und Modellersteller f{\"u}r die konkrete inhaltliche, fachliche Ausgestaltung des Verkehrsmodells.}, language = {de} } @article{EmhardtJarodzkaBrandGruweletal.2022, author = {Emhardt, Selina N. and Jarodzka, Halszka and Brand-Gruwel, Saskia and Drumm, Christian and Niehorster, Diederick C. and van Gog, Tamara}, title = {What is my teacher talking about? Effects of displaying the teacher's gaze and mouse cursor cues in video lectures on students' learning}, series = {Journal of Cognitive Psychology}, journal = {Journal of Cognitive Psychology}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2044-5911}, doi = {10.1080/20445911.2022.2080831}, pages = {1 -- 19}, year = {2022}, abstract = {Eye movement modelling examples (EMME) are instructional videos that display a teacher's eye movements as "gaze cursor" (e.g. a moving dot) superimposed on the learning task. This study investigated if previous findings on the beneficial effects of EMME would extend to online lecture videos and compared the effects of displaying the teacher's gaze cursor with displaying the more traditional mouse cursor as a tool to guide learners' attention. Novices (N = 124) studied a pre-recorded video lecture on how to model business processes in a 2 (mouse cursor absent/present) × 2 (gaze cursor absent/present) between-subjects design. Unexpectedly, we did not find significant effects of the presence of gaze or mouse cursors on mental effort and learning. However, participants who watched videos with the gaze cursor found it easier to follow the teacher. Overall, participants responded positively to the gaze cursor, especially when the mouse cursor was not displayed in the video.}, language = {en} } @article{EmhardtJarodzkaBrandGruweletal.2020, author = {Emhardt, Selina and Jarodzka, Halszka and Brand-Gruwel, Saskia and Drumm, Christian and Gog, Tamara van}, title = {Introducing eye movement modeling examples for programming education and the role of teacher's didactic guidance}, series = {ETRA '20 Short Papers: ACM Symposium on Eye Tracking Research and Applications}, journal = {ETRA '20 Short Papers: ACM Symposium on Eye Tracking Research and Applications}, number = {Art. 52}, publisher = {ACM}, address = {New York}, doi = {10.1145/3379156.3391978}, pages = {1 -- 4}, year = {2020}, abstract = {In this article, we introduce how eye-tracking technology might become a promising tool to teach programming skills, such as debugging with 'Eye Movement Modeling Examples' (EMME). EMME are tutorial videos that visualize an expert's (e.g., a programming teacher's) eye movements during task performance to guide students' attention, e.g., as a moving dot or circle. We first introduce the general idea behind the EMME method and present studies that showed first promising results regarding the benefits of EMME to support programming education. However, we argue that the instructional design of EMME varies notably across them, as evidence-based guidelines on how to create effective EMME are often lacking. As an example, we present our ongoing research on the effects of different ways to instruct the EMME model prior to video creation. Finally, we highlight open questions for future investigations that could help improving the design of EMME for (programming) education.}, language = {en} } @article{ElsenKraissKrumbiegeletal.1999, author = {Elsen, Ingo and Kraiss, Karl-Friedrich and Krumbiegel, Dirk and Walter, Peter and Wickel, Jochen}, title = {Visual information retrieval for 3D product identification: a midterm report}, series = {KI - K{\"u}nstliche Intelligenz}, volume = {13}, journal = {KI - K{\"u}nstliche Intelligenz}, number = {1}, publisher = {Springer}, address = {Berlin}, issn = {1610-1987}, pages = {64 -- 67}, year = {1999}, language = {en} } @article{ElsenKraiss1999, author = {Elsen, Ingo and Kraiss, Karl-Friedrich}, title = {System concept and realization of a scalable neurocomputing architecture}, series = {Systems Analysis Modelling Simulation}, volume = {35}, journal = {Systems Analysis Modelling Simulation}, number = {4}, publisher = {Gordon and Breach Science Publishers}, address = {Amsterdam}, issn = {0232-9298}, pages = {399 -- 419}, year = {1999}, abstract = {This paper describes the realization of a novel neurocomputer which is based on the concepts of a coprocessor. In contrast to existing neurocomputers the main interest was the realization of a scalable, flexible system, which is capable of computing neural networks of arbitrary topology and scale, with full independence of special hardware from the software's point of view. On the other hand, computational power should be added, whenever needed and flexibly adapted to the requirements of the application. Hardware independence is achieved by a run time system which is capable of using all available computing power, including multiple host CPUs and an arbitrary number of neural coprocessors autonomously. The realization of arbitrary neural topologies is provided through the implementation of the elementary operations which can be found in most neural topologies.}, language = {en} }