@article{KotliarOrtnerConradietal.2022, author = {Kotliar, Konstantin and Ortner, Marion and Conradi, Anna and Hacker, Patricia and Hauser, Christine and G{\"u}nthner, Roman and Moser, Michaela and Muggenthaler, Claudia and Diehl-Schmid, Janine and Priller, Josef and Schmaderer, Christoph and Grimmer, Timo}, title = {Altered retinal cerebral vessel oscillation frequencies in Alzheimer's disease compatible with impaired amyloid clearance}, series = {Neurobiology of Aging}, volume = {120}, journal = {Neurobiology of Aging}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0197-4580}, doi = {10.1016/j.neurobiolaging.2022.08.012}, pages = {117 -- 127}, year = {2022}, abstract = {Retinal vessels are similar to cerebral vessels in their structure and function. Moderately low oscillation frequencies of around 0.1 Hz have been reported as the driving force for paravascular drainage in gray matter in mice and are known as the frequencies of lymphatic vessels in humans. We aimed to elucidate whether retinal vessel oscillations are altered in Alzheimer's disease (AD) at the stage of dementia or mild cognitive impairment (MCI). Seventeen patients with mild-to-moderate dementia due to AD (ADD); 23 patients with MCI due to AD, and 18 cognitively healthy controls (HC) were examined using Dynamic Retinal Vessel Analyzer. Oscillatory temporal changes of retinal vessel diameters were evaluated using mathematical signal analysis. Especially at moderately low frequencies around 0.1 Hz, arterial oscillations in ADD and MCI significantly prevailed over HC oscillations and correlated with disease severity. The pronounced retinal arterial vasomotion at moderately low frequencies in the ADD and MCI groups would be compatible with the view of a compensatory upregulation of paravascular drainage in AD and strengthen the amyloid clearance hypothesis.}, language = {en} } @article{Maurischat2022, author = {Maurischat, Andreas}, title = {Algebraic independence of the Carlitz period and its hyperderivatives}, series = {Journal of Number Theory}, volume = {240}, journal = {Journal of Number Theory}, publisher = {Elsevier}, address = {Orlando, Fla.}, issn = {0022-314X}, doi = {10.1016/j.jnt.2022.01.006}, pages = {145 -- 162}, year = {2022}, language = {en} } @inproceedings{AmirBauckhageChircuetal.2022, author = {Amir, Malik and Bauckhage, Christian and Chircu, Alina and Czarnecki, Christian and Knopf, Christian and Piatkowski, Nico and Sultanow, Eldar}, title = {What can we expect from quantum (digital) twins?}, publisher = {AIS Electronic Library (AISeL)}, pages = {1 -- 14}, year = {2022}, abstract = {Digital twins enable the modeling and simulation of real-world entities (objects, processes or systems), resulting in improvements in the associated value chains. The emerging field of quantum computing holds tremendous promise for evolving this virtualization towards Quantum (Digital) Twins (QDT) and ultimately Quantum Twins (QT). The quantum (digital) twin concept is not a contradiction in terms - but instead describes a hybrid approach that can be implemented using the technologies available today by combining classical computing and digital twin concepts with quantum processing. This paper presents the status quo of research and practice on quantum (digital) twins. It also discuses their potential to create competitive advantage through real-time simulation of highly complex, interconnected entities that helps companies better address changes in their environment and differentiate their products and services.}, language = {en} } @article{LindnerBurgerRutledgeetal.2022, author = {Lindner, Simon and Burger, Ren{\´e} and Rutledge, Douglas N. and Do, Xuan Tung and Rumpf, Jessica and Diehl, Bernd W. K. and Schulze, Margit and Monakhova, Yulia}, title = {Is the calibration transfer of multivariate calibration models between high- and low-field NMR instruments possible? A case study of lignin molecular weight}, series = {Analytical chemistry}, volume = {94}, journal = {Analytical chemistry}, number = {9}, publisher = {ACS Publications}, address = {Washington, DC}, isbn = {1520-6882}, doi = {10.1021/acs.analchem.1c05125}, pages = {3997 -- 4004}, year = {2022}, abstract = {Although several successful applications of benchtop nuclear magnetic resonance (NMR) spectroscopy in quantitative mixture analysis exist, the possibility of calibration transfer remains mostly unexplored, especially between high- and low-field NMR. This study investigates for the first time the calibration transfer of partial least squares regressions [weight average molecular weight (Mw) of lignin] between high-field (600 MHz) NMR and benchtop NMR devices (43 and 60 MHz). For the transfer, piecewise direct standardization, calibration transfer based on canonical correlation analysis, and transfer via the extreme learning machine auto-encoder method are employed. Despite the immense resolution difference between high-field and low-field NMR instruments, the results demonstrate that the calibration transfer from high- to low-field is feasible in the case of a physical property, namely, the molecular weight, achieving validation errors close to the original calibration (down to only 1.2 times higher root mean square errors). These results introduce new perspectives for applications of benchtop NMR, in which existing calibrations from expensive high-field instruments can be transferred to cheaper benchtop instruments to economize.}, language = {en} } @article{MonakhovaDiehl2022, author = {Monakhova, Yulia and Diehl, Bernd W.K.}, title = {Multinuclear NMR screening of pharmaceuticals using standardization by 2H integral of a deuterated solvent}, series = {Journal of Pharmaceutical and Biomedical Analysis}, volume = {209}, journal = {Journal of Pharmaceutical and Biomedical Analysis}, number = {Article number: 114530}, publisher = {Elsevier}, isbn = {0731-7085}, doi = {10.1016/j.jpba.2021.114530}, year = {2022}, abstract = {NMR standardization approach that uses the 2H integral of deuterated solvent for quantitative multinuclear analysis of pharmaceuticals is described. As a proof of principle, the existing NMR procedure for the analysis of heparin products according to US Pharmacopeia monograph is extended to the determination of Na+ and Cl- content in this matrix. Quantification is performed based on the ratio of a 23Na (35Cl) NMR integral and 2H NMR signal of deuterated solvent, D2O, acquired using the specific spectrometer hardware. As an alternative, the possibility of 133Cs standardization using the addition of Cs2CO3 stock solution is shown. Validation characteristics (linearity, repeatability, sensitivity) are evaluated. A holistic NMR profiling of heparin products can now also be used for the quantitative determination of inorganic compounds in a single analytical run using a single sample. In general, the new standardization methodology provides an appealing alternative for the NMR screening of inorganic and organic components in pharmaceutical products.}, language = {en} } @article{BurgerLindnerRumpfetal.2022, author = {Burger, Ren{\´e} and Lindner, Simon and Rumpf, Jessica and Do, Xuan Tung and Diehl, Bernd W.K. and Rehahn, Matthias and Monakhova, Yulia and Schulze, Margit}, title = {Benchtop versus high field NMR: Comparable performance found for the molecular weight determination of lignin}, series = {Journal of Pharmaceutical and Biomedical Analysis}, volume = {212}, journal = {Journal of Pharmaceutical and Biomedical Analysis}, number = {Article number: 114649}, publisher = {Elsevier}, address = {New York, NY}, isbn = {0731-7085}, doi = {10.1016/j.jpba.2022.114649}, year = {2022}, abstract = {Lignin is a promising renewable biopolymer being investigated worldwide as an environmentally benign substitute of fossil-based aromatic compounds, e.g. for the use as an excipient with antioxidant and antimicrobial properties in drug delivery or even as active compound. For its successful implementation into process streams, a quick, easy, and reliable method is needed for its molecular weight determination. Here we present a method using 1H spectra of benchtop as well as conventional NMR systems in combination with multivariate data analysis, to determine lignin's molecular weight (Mw and Mn) and polydispersity index (PDI). A set of 36 organosolv lignin samples (from Miscanthus x giganteus, Paulownia tomentosa and Silphium perfoliatum) was used for the calibration and cross validation, and 17 samples were used as external validation set. Validation errors between 5.6\% and 12.9\% were achieved for all parameters on all NMR devices (43, 60, 500 and 600 MHz). Surprisingly, no significant difference in the performance of the benchtop and high-field devices was found. This facilitates the application of this method for determining lignin's molecular weight in an industrial environment because of the low maintenance expenditure, small footprint, ruggedness, and low cost of permanent magnet benchtop NMR systems.}, language = {en} } @article{MonakhovaSobolevaFedotovaetal.2022, author = {Monakhova, Yulia and Soboleva, Polina M. and Fedotova, Elena S. and Musina, Kristina T. and Burmistrova, Natalia A.}, title = {Quantum chemical calculations of IR spectra of heparin disaccharide subunits}, series = {Computational and Theoretical Chemistry}, volume = {1217}, journal = {Computational and Theoretical Chemistry}, number = {Article number: 113891}, publisher = {Elsevier}, address = {New York, NY}, isbn = {2210-271X}, doi = {10.1016/j.comptc.2022.113891}, year = {2022}, abstract = {Heparin is a natural polysaccharide, which plays essential role in many biological processes. Alterations in building blocks can modify biological roles of commercial heparin products, due to significant changes in the conformation of the polymer chain. The variability structure of heparin leads to difficulty in quality control using different analytical methods, including infrared (IR) spectroscopy. In this paper molecular modelling of heparin disaccharide subunits was performed using quantum chemistry. The structural and spectral parameters of these disaccharides have been calculated using RHF/6-311G. In addition, over-sulphated chondroitin sulphate disaccharide was studied as one of the most widespread contaminants of heparin. Calculated IR spectra were analyzed with respect to specific structure parameters. IR spectroscopic fingerprint was found to be sensitive to substitution pattern of disaccharide subunits. Vibrational assignments of calculated spectra were correlated with experimental IR spectral bands of native heparin. Chemometrics was used to perform multivariate analysis of simulated spectral data.}, language = {en} } @incollection{AkimbekovDigelRazzaque2022, author = {Akimbekov, Nuraly S. and Digel, Ilya and Razzaque, Mohammed S.}, title = {Role of vitamins in maintaining structure and function of intestinal microbiome}, series = {Comprehensive Gut Microbiota}, booktitle = {Comprehensive Gut Microbiota}, publisher = {Elsevier}, address = {Amsterdam}, isbn = {978-0-12-822036-8}, doi = {10.1016/B978-0-12-819265-8.00043-7}, pages = {320 -- 334}, year = {2022}, abstract = {The recent advances in microbiology have shed light on understanding the role of vitamins beyond the nutritional range. Vitamins are critical in contributing to healthy biodiversity and maintaining the proper function of gut microbiota. The sharing of vitamins among bacterial populations promotes stability in community composition and diversity; however, this balance becomes disturbed in various pathologies. Here, we overview and analyze the ability of different vitamins to selectively and specifically induce changes in the intestinal microbial community. Some schemes and regularities become visible, which may provide new insights and avenues for therapeutic management and functional optimization of the gut microbiota.}, language = {en} } @incollection{AkimbekovDigelSherelkhanetal.2022, author = {Akimbekov, Nuraly S. and Digel, Ilya and Sherelkhan, Dinara K. and Razzaque, Mohammed S.}, title = {Vitamin D and Phosphate Interactions in Health and Disease}, series = {Phosphate Metabolism}, booktitle = {Phosphate Metabolism}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-91621-3}, doi = {10.1007/978-3-030-91623-7_5}, pages = {37 -- 46}, year = {2022}, abstract = {Vitamin D plays an essential role in calcium and inorganic phosphate (Pi) homeostasis, maintaining their optimal levels to assure adequate bone mineralization. Vitamin D, as calcitriol (1,25(OH)2D), not only increases intestinal calcium and phosphate absorption but also facilitates their renal reabsorption, leading to elevated serum calcium and phosphate levels. The interaction of 1,25(OH)2D with its receptor (VDR) increases the efficiency of intestinal absorption of calcium to 30-40\% and phosphate to nearly 80\%. Serum phosphate levels can also influence 1,25 (OH)2D and fibroblast growth factor 23 (FGF23) levels, i.e., higher phosphate concentrations suppress vitamin D activation and stimulate parathyroid hormone (PTH) release, while a high FGF23 serum level leads to reduced vitamin D synthesis. In the vitamin D-deficient state, the intestinal calcium absorption decreases and the secretion of PTH increases, which in turn causes the stimulation of 1,25(OH)2D production, resulting in excessive urinary phosphate loss. Maintenance of phosphate homeostasis is essential as hyperphosphatemia is a risk factor of cardiovascular calcification, chronic kidney diseases (CKD), and premature aging, while hypophosphatemia is usually associated with rickets and osteomalacia. This chapter elaborates on the possible interactions between vitamin D and phosphate in health and disease.}, language = {en} } @article{BhattaraiHorbachStaatetal.2022, author = {Bhattarai, Aroj and Horbach, Andreas and Staat, Manfred and Kowalczyk, Wojciech and Tran, Thanh Ngoc}, title = {Virgin passive colon biomechanics and a literature review of active contraction constitutive models}, series = {Biomechanics}, volume = {2}, journal = {Biomechanics}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2673-7078}, doi = {10.3390/biomechanics2020013}, pages = {138 -- 157}, year = {2022}, abstract = {The objective of this paper is to present our findings on the biomechanical aspects of the virgin passive anisotropic hyperelasticity of the porcine colon based on equibiaxial tensile experiments. Firstly, the characterization of the intestine tissues is discussed for a nearly incompressible hyperelastic fiber-reinforced Holzapfel-Gasser-Ogden constitutive model in virgin passive loading conditions. The stability of the evaluated material parameters is checked for the polyconvexity of the adopted strain energy function using positive eigenvalue constraints of the Hessian matrix with MATLAB. The constitutive material description of the intestine with two collagen fibers in the submucosal and muscular layer each has been implemented in the FORTRAN platform of the commercial finite element software LS-DYNA, and two equibiaxial tensile simulations are presented to validate the results with the optical strain images obtained from the experiments. Furthermore, this paper also reviews the existing models of the active smooth muscle cells, but these models have not been computationally studied here. The review part shows that the constitutive models originally developed for the active contraction of skeletal muscle based on Hill's three-element model, Murphy's four-state cross-bridge chemical kinetic model and Huxley's sliding-filament hypothesis, which are mainly used for arteries, are appropriate for numerical contraction numerical analysis of the large intestine.}, language = {en} } @article{ValeroBungErpicumetal.2022, author = {Valero, Daniel and Bung, Daniel B. and Erpicum, Sebastien and Peltier, Yann and Dewals, Benjamin}, title = {Unsteady shallow meandering flows in rectangular reservoirs: a modal analysis of URANS modelling}, series = {Journal of Hydro-environment Research}, journal = {Journal of Hydro-environment Research}, number = {In Press}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1570-6443}, doi = {10.1016/j.jher.2022.03.002}, year = {2022}, abstract = {Shallow flows are common in natural and human-made environments. Even for simple rectangular shallow reservoirs, recent laboratory experiments show that the developing flow fields are particularly complex, involving large-scale turbulent structures. For specific combinations of reservoir size and hydraulic conditions, a meandering jet can be observed. While some aspects of this pseudo-2D flow pattern can be reproduced using a 2D numerical model, new 3D simulations, based on the unsteady Reynolds-Averaged Navier-Stokes equations, show consistent advantages as presented herein. A Proper Orthogonal Decomposition was used to characterize the four most energetic modes of the meandering jet at the free surface level, allowing comparison against experimental data and 2D (depth-averaged) numerical results. Three different isotropic eddy viscosity models (RNG k-ε, k-ε, k-ω) were tested. The 3D models accurately predicted the frequency of the modes, whereas the amplitudes of the modes and associated energy were damped for the friction-dominant cases and augmented for non-frictional ones. The performance of the three turbulence models remained essentially similar, with slightly better predictions by RNG k-ε model in the case with the highest Reynolds number. Finally, the Q-criterion was used to identify vortices and study their dynamics, assisting on the identification of the differences between: i) the three-dimensional phenomenon (here reproduced), ii) its two-dimensional footprint in the free surface (experimental observations) and iii) the depth-averaged case (represented by 2D models).}, language = {en} } @inproceedings{WiegnerVolkerMainzetal.2022, author = {Wiegner, Jonas and Volker, Hanno and Mainz, Fabian and Backes, Andreas and L{\"o}ken, Michael and H{\"u}ning, Felix}, title = {Wiegand-effect-powered wireless IoT sensor node}, series = {Sensoren und Messsysteme 2022}, booktitle = {Sensoren und Messsysteme 2022}, publisher = {VDE Verlag GmbH}, address = {Berlin}, isbn = {978-3-8007-5835-7}, pages = {255 -- 260}, year = {2022}, abstract = {In this article we describe an Internet-of-Things sensing device with a wireless interface which is powered by the oftenoverlooked harvesting method of the Wiegand effect. The sensor can determine position, temperature or other resistively measurable quantities and can transmit the data via an ultra-low power ultra-wideband (UWB) data transmitter. With this approach we can energy-self-sufficiently acquire, process, and wirelessly transmit data in a pulsed operation. A proof-of-concept system was built up to prove the feasibility of the approach. The energy consumption of the system is analyzed and traced back in detail to the individual components, compared to the generated energy and processed to identify further optimization options. Based on the proof-of-concept, an application demonstrator was developed. Finally, we point out possible use cases.}, language = {en} } @article{UlmerBraunChengetal.2022, author = {Ulmer, Jessica and Braun, Sebastian and Cheng, Chi-Tsun and Dowey, Steve and Wollert, J{\"o}rg}, title = {Gamification of virtual reality assembly training: Effects of a combined point and level system on motivation and training results}, series = {International Journal of Human-Computer Studies}, volume = {165}, journal = {International Journal of Human-Computer Studies}, number = {Art. No. 102854}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1071-5819}, doi = {10.1016/j.ijhcs.2022.102854}, year = {2022}, abstract = {Virtual Reality (VR) offers novel possibilities for remote training regardless of the availability of the actual equipment, the presence of specialists, and the training locations. Research shows that training environments that adapt to users' preferences and performance can promote more effective learning. However, the observed results can hardly be traced back to specific adaptive measures but the whole new training approach. This study analyzes the effects of a combined point and leveling VR-based gamification system on assembly training targeting specific training outcomes and users' motivations. The Gamified-VR-Group with 26 subjects received the gamified training, and the Non-Gamified-VR-Group with 27 subjects received the alternative without gamified elements. Both groups conducted their VR training at least three times before assembling the actual structure. The study found that a level system that gradually increases the difficulty and error probability in VR can significantly lower real-world error rates, self-corrections, and support usages. According to our study, a high error occurrence at the highest training level reduced the Gamified-VR-Group's feeling of competence compared to the Non-Gamified-VR-Group, but at the same time also led to lower error probabilities in real-life. It is concluded that a level system with a variable task difficulty should be combined with carefully balanced positive and negative feedback messages. This way, better learning results, and an improved self-evaluation can be achieved while not causing significant impacts on the participants' feeling of competence.}, language = {en} } @inproceedings{EggertKriska2022, author = {Eggert, Mathias and Kriska, Melina}, title = {Gamification for software development processes - relevant affordances and design principles}, series = {Proceedings of the 55th Hawaii International Conference on System Sciences}, booktitle = {Proceedings of the 55th Hawaii International Conference on System Sciences}, publisher = {HICSS Publishing}, address = {Honolulu}, isbn = {978-0-9981331-5-7}, doi = {10.24251/HICSS.2022.200}, pages = {1614 -- 1623}, year = {2022}, abstract = {A Gamified Information System (GIS) implements game concepts and elements, such as affordances and game design principles to motivate people. Based on the idea to develop a GIS to increase the motivation of software developers to perform software quality tasks, the research work at hand aims at investigating relevant requirements from that target group. Therefore, 14 interviews with software development experts are conducted and analyzed. According to the results, software developers prefer the affordances points, narrative storytelling in a multiplayer and a round-based setting. Furthermore, six design principles for the development of a GIS are derived.}, language = {en} } @incollection{HoffschmidtAlexopoulosRauetal.2022, author = {Hoffschmidt, Bernhard and Alexopoulos, Spiros and Rau, Christoph and Sattler, Johannes, Christoph and Anthrakidis, Anette and Teixeira Boura, Cristiano Jos{\´e} and O'Connor, B. and Chico Caminos, R.A. and Rend{\´o}n, C. and Hilger, P.}, title = {Concentrating solar power}, series = {Comprehensive Renewable Energy (Second Edition) / Volume 3: Solar Thermal Systems: Components and Applications}, booktitle = {Comprehensive Renewable Energy (Second Edition) / Volume 3: Solar Thermal Systems: Components and Applications}, publisher = {Elsevier}, address = {Amsterdam}, isbn = {978-0-12-819734-9}, pages = {670 -- 724}, year = {2022}, abstract = {The focus of this chapter is the production of power and the use of the heat produced from concentrated solar thermal power (CSP) systems. The chapter starts with the general theoretical principles of concentrating systems including the description of the concentration ratio, the energy and mass balance. The power conversion systems is the main part where solar-only operation and the increase in operational hours. Solar-only operation include the use of steam turbines, gas turbines, organic Rankine cycles and solar dishes. The operational hours can be increased with hybridization and with storage. Another important topic is the cogeneration where solar cooling, desalination and of heat usage is described. Many examples of commercial CSP power plants as well as research facilities from the past as well as current installed and in operation are described in detail. The chapter closes with economic and environmental aspects and with the future potential of the development of CSP around the world.}, language = {en} } @incollection{HoffschmidtAlexopoulosGoettscheetal.2022, author = {Hoffschmidt, Bernhard and Alexopoulos, Spiros and G{\"o}ttsche, Joachim and Sauerborn, Markus and Kaufhold, O.}, title = {High Concentration Solar Collectors}, series = {Comprehensive Renewable Energy (Second Edition) / Volume 3: Solar Thermal Systems: Components and Applications}, booktitle = {Comprehensive Renewable Energy (Second Edition) / Volume 3: Solar Thermal Systems: Components and Applications}, publisher = {Elsevier}, address = {Amsterdam}, isbn = {978-0-12-819734-9}, doi = {10.1016/B978-0-12-819727-1.00058-3}, pages = {198 -- 245}, year = {2022}, abstract = {Solar thermal concentrated power is an emerging technology that provides clean electricity for the growing energy market. To the solar thermal concentrated power plant systems belong the parabolic trough, the Fresnel collector, the solar dish, and the central receiver system. For high-concentration solar collector systems, optical and thermal analysis is essential. There exist a number of measurement techniques and systems for the optical and thermal characterization of the efficiency of solar thermal concentrated systems. For each system, structure, components, and specific characteristics types are described. The chapter presents additionally an outline for the calculation of system performance and operation and maintenance topics. One main focus is set to the models of components and their construction details as well as different types on the market. In the later part of this article, different criteria for the choice of technology are analyzed in detail.}, language = {en} } @inproceedings{StaatTran2022, author = {Staat, Manfred and Tran, Ngoc Trinh}, title = {Strain based brittle failure criteria for rocks}, series = {Proceedings of (NACOME2022) The 11th National Conference on Mechanics, Vol. 1. Solid Mechanics, Rock Mechanics, Artificial Intelligence, Teaching and Training, Hanoi, December 2-3, 2022}, booktitle = {Proceedings of (NACOME2022) The 11th National Conference on Mechanics, Vol. 1. Solid Mechanics, Rock Mechanics, Artificial Intelligence, Teaching and Training, Hanoi, December 2-3, 2022}, publisher = {Nha xuat ban Khoa hoc tu nhien va Cong nghe (Verlag Naturwissenschaft und Technik)}, address = {Hanoi}, isbn = {978-604-357-084-7}, pages = {500 -- 509}, year = {2022}, abstract = {When confining pressure is low or absent, extensional fractures are typical, with fractures occurring on unloaded planes in rock. These "paradox" fractures can be explained by a phenomenological extension strain failure criterion. In the past, a simple empirical criterion for fracture initiation in brittle rock has been developed. But this criterion makes unrealistic strength predictions in biaxial compression and tension. A new extension strain criterion overcomes this limitation by adding a weighted principal shear component. The weight is chosen, such that the enriched extension strain criterion represents the same failure surface as the Mohr-Coulomb (MC) criterion. Thus, the MC criterion has been derived as an extension strain criterion predicting failure modes, which are unexpected in the understanding of the failure of cohesive-frictional materials. In progressive damage of rock, the most likely fracture direction is orthogonal to the maximum extension strain. The enriched extension strain criterion is proposed as a threshold surface for crack initiation CI and crack damage CD and as a failure surface at peak P. Examples show that the enriched extension strain criterion predicts much lower volumes of damaged rock mass compared to the simple extension strain criterion.}, language = {en} } @article{MuellerSeginWeigandetal.2022, author = {Mueller, Tobias and Segin, Alexander and Weigand, Christoph and Schmitt, Robert H.}, title = {Feature selection for measurement models}, series = {International journal of quality \& reliability management}, journal = {International journal of quality \& reliability management}, number = {Vol. ahead-of-print, No. ahead-of-print.}, publisher = {Emerald Group Publishing Limited}, address = {Bingley}, issn = {0265-671X}, doi = {10.1108/IJQRM-07-2021-0245}, year = {2022}, abstract = {Purpose In the determination of the measurement uncertainty, the GUM procedure requires the building of a measurement model that establishes a functional relationship between the measurand and all influencing quantities. Since the effort of modelling as well as quantifying the measurement uncertainties depend on the number of influencing quantities considered, the aim of this study is to determine relevant influencing quantities and to remove irrelevant ones from the dataset. Design/methodology/approach In this work, it was investigated whether the effort of modelling for the determination of measurement uncertainty can be reduced by the use of feature selection (FS) methods. For this purpose, 9 different FS methods were tested on 16 artificial test datasets, whose properties (number of data points, number of features, complexity, features with low influence and redundant features) were varied via a design of experiments. Findings Based on a success metric, the stability, universality and complexity of the method, two FS methods could be identified that reliably identify relevant and irrelevant influencing quantities for a measurement model. Originality/value For the first time, FS methods were applied to datasets with properties of classical measurement processes. The simulation-based results serve as a basis for further research in the field of FS for measurement models. The identified algorithms will be applied to real measurement processes in the future.}, language = {en} } @article{TranTrinhDaoetal.2022, author = {Tran, Ngoc Trinh and Trinh, Tu Luc and Dao, Ngoc Tien and Giap, Van Tan and Truong, Manh Khuyen and Dinh, Thuy Ha and Staat, Manfred}, title = {FEM shakedown analysis of structures under random strength with chance constrained programming}, series = {Vietnam Journal of Mechanics}, volume = {44}, journal = {Vietnam Journal of Mechanics}, number = {4}, publisher = {Vietnam Academy of Science and Technology (VAST)}, issn = {0866-7136}, doi = {10.15625/0866-7136/17943}, pages = {459 -- 473}, year = {2022}, abstract = {Direct methods, comprising limit and shakedown analysis, are a branch of computational mechanics. They play a significant role in mechanical and civil engineering design. The concept of direct methods aims to determine the ultimate load carrying capacity of structures beyond the elastic range. In practical problems, the direct methods lead to nonlinear convex optimization problems with a large number of variables and constraints. If strength and loading are random quantities, the shakedown analysis can be formulated as stochastic programming problem. In this paper, a method called chance constrained programming is presented, which is an effective method of stochastic programming to solve shakedown analysis problems under random conditions of strength. In this study, the loading is deterministic, and the strength is a normally or lognormally distributed variable.}, language = {en} } @article{BaringhausGaigall2022, author = {Baringhaus, Ludwig and Gaigall, Daniel}, title = {A goodness-of-fit test for the compound Poisson exponential model}, series = {Journal of Multivariate Analysis}, volume = {195}, journal = {Journal of Multivariate Analysis}, number = {Article 105154}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0047-259X}, doi = {10.1016/j.jmva.2022.105154}, year = {2022}, abstract = {On the basis of bivariate data, assumed to be observations of independent copies of a random vector (S,N), we consider testing the hypothesis that the distribution of (S,N) belongs to the parametric class of distributions that arise with the compound Poisson exponential model. Typically, this model is used in stochastic hydrology, with N as the number of raindays, and S as total rainfall amount during a certain time period, or in actuarial science, with N as the number of losses, and S as total loss expenditure during a certain time period. The compound Poisson exponential model is characterized in the way that a specific transform associated with the distribution of (S,N) satisfies a certain differential equation. Mimicking the function part of this equation by substituting the empirical counterparts of the transform we obtain an expression the weighted integral of the square of which is used as test statistic. We deal with two variants of the latter, one of which being invariant under scale transformations of the S-part by fixed positive constants. Critical values are obtained by using a parametric bootstrap procedure. The asymptotic behavior of the tests is discussed. A simulation study demonstrates the performance of the tests in the finite sample case. The procedure is applied to rainfall data and to an actuarial dataset. A multivariate extension is also discussed.}, language = {en} }