@article{FerreinSchifferLakemeyer2009, author = {Ferrein, Alexander and Schiffer, Stefan and Lakemeyer, Gerhard}, title = {Embedding fuzzy controllers in golog / Ferrein, Alexander ; Schiffer, Stefan ; Lakemeyer, Gerhard}, series = {IEEE International Conference on Fuzzy Systems, 2009. FUZZ-IEEE 2009}, journal = {IEEE International Conference on Fuzzy Systems, 2009. FUZZ-IEEE 2009}, publisher = {IEEE}, address = {New York}, isbn = {978-1-4244-3596-8}, pages = {894 -- 899}, year = {2009}, language = {en} } @inproceedings{NeumannDuelbergSchifferetal.2016, author = {Neumann, Tobias and D{\"u}lberg, Enno and Schiffer, Stefan and Ferrein, Alexander}, title = {A rotating platform for swift acquisition of dense 3D point clouds}, series = {Intelligent Robotics and Applications: 9th International Conference, ICIRA 2016, Tokyo, Japan, August 22-24, 2016, Proceedings, Part I}, volume = {9834}, booktitle = {Intelligent Robotics and Applications: 9th International Conference, ICIRA 2016, Tokyo, Japan, August 22-24, 2016, Proceedings, Part I}, publisher = {Springer}, isbn = {978-3-319-43505-3 (Print)}, doi = {10.1007/978-3-319-43506-0_22}, pages = {257 -- 268}, year = {2016}, language = {en} } @article{FerreinFritzLakemeyer2004, author = {Ferrein, Alexander and Fritz, Christian and Lakemeyer, Gerhard}, title = {On-Line Decision-Theoretic Golog for Unpredictable Domains / Ferrein, Alexander ; Fritz, Christian ; Lakemeyer, Gerhard}, series = {KI 2004: Advances in Artificial Intelligence : 27th Annual German Conference on AI, KI 2004, Ulm, Germany, September 20-24, 2004. Proceedings}, journal = {KI 2004: Advances in Artificial Intelligence : 27th Annual German Conference on AI, KI 2004, Ulm, Germany, September 20-24, 2004. Proceedings}, publisher = {Springer}, address = {Berlin}, pages = {322 -- 336}, year = {2004}, language = {en} } @incollection{AlhwarinFerreinScholl2014, author = {Alhwarin, Faraj and Ferrein, Alexander and Scholl, Ingrid}, title = {IR stereo kinect: improving depth images by combining structured light with IR stereo}, series = {PRICAI 2014: Trends in artificial intelligence : 13th Pacific Rim International Conference on Artificial Intelligence : Gold Coast, QLD, Australia, December 1-5, 2014 : proceedings. (Lecture notes in computer science ; vol. 8862)}, booktitle = {PRICAI 2014: Trends in artificial intelligence : 13th Pacific Rim International Conference on Artificial Intelligence : Gold Coast, QLD, Australia, December 1-5, 2014 : proceedings. (Lecture notes in computer science ; vol. 8862)}, publisher = {Springer}, address = {M{\"u}nchen}, isbn = {978-3-319-13559-5 (Print) ; 978-3-319-13560-1 (E-Book)}, doi = {10.1007/978-3-319-13560-1_33}, pages = {409 -- 421}, year = {2014}, abstract = {RGB-D sensors such as the Microsoft Kinect or the Asus Xtion are inexpensive 3D sensors. A depth image is computed by calculating the distortion of a known infrared light (IR) pattern which is projected into the scene. While these sensors are great devices they have some limitations. The distance they can measure is limited and they suffer from reflection problems on transparent, shiny, or very matte and absorbing objects. If more than one RGB-D camera is used the IR patterns interfere with each other. This results in a massive loss of depth information. In this paper, we present a simple and powerful method to overcome these problems. We propose a stereo RGB-D camera system which uses the pros of RGB-D cameras and combine them with the pros of stereo camera systems. The idea is to utilize the IR images of each two sensors as a stereo pair to generate a depth map. The IR patterns emitted by IR projectors are exploited here to enhance the dense stereo matching even if the observed objects or surfaces are texture-less or transparent. The resulting disparity map is then fused with the depth map offered by the RGB-D sensor to fill the regions and the holes that appear because of interference, or due to transparent or reflective objects. Our results show that the density of depth information is increased especially for transparent, shiny or matte objects.}, language = {en} } @article{FerreinJacobsLakemeyer2005, author = {Ferrein, Alexander and Jacobs, Stefan and Lakemeyer, Gerhard}, title = {Unreal Golog Bots / Jacobs, Stefan ; Ferrein, Alexander ; Lakemeyer, Gerhard}, series = {IJCAI-05 Workshop on Reasoning, Representation, and Learning in Computer Games}, journal = {IJCAI-05 Workshop on Reasoning, Representation, and Learning in Computer Games}, pages = {31 -- 36}, year = {2005}, language = {en} } @article{FerreinBoehnstedtLakemeyer2007, author = {Ferrein, Alexander and B{\"o}hnstedt, Lutz and Lakemeyer, Gerhard}, title = {Options in readylog reloaded -- generating decision-theoretic plan libraries in golog / B{\"o}hnstedt, Lutz ; Ferrein, Alexander ; Lakemeyer, Gerhard}, series = {KI 2007: advances in artificial intelligence : 30th Annual German Conference on AI, KI 2007, Osnabr{\"u}ck, Germany, September 10 - 13, 2007 ; proceedings}, journal = {KI 2007: advances in artificial intelligence : 30th Annual German Conference on AI, KI 2007, Osnabr{\"u}ck, Germany, September 10 - 13, 2007 ; proceedings}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-540-74564-8}, pages = {352 -- 366}, year = {2007}, language = {en} } @inproceedings{NeumannFerreinKallweitetal.2014, author = {Neumann, Tobias and Ferrein, Alexander and Kallweit, Stephan and Scholl, Ingrid}, title = {Towards a mobile mapping robot for underground mines}, series = {7th Conference of Robotics and Mechatronics : RobMech 2014 : 27th and 28th Nov. 2014, Cape Town}, booktitle = {7th Conference of Robotics and Mechatronics : RobMech 2014 : 27th and 28th Nov. 2014, Cape Town}, organization = {Conference of Robotics and Mechatronics <7, 2014, Cape Town, South Africa>}, pages = {1 -- 6}, year = {2014}, language = {en} } @article{FerreinMeyer2012, author = {Ferrein, Alexander and Meyer, Thomas}, title = {A Brief Overview of Artificial Intelligence in South Africa}, series = {AI Magazine}, volume = {33}, journal = {AI Magazine}, number = {1}, publisher = {AAAI}, address = {Menlo Park}, issn = {0738-4602}, doi = {10.1609/aimag.v33i1.2357}, pages = {99 -- 101}, year = {2012}, abstract = {South Africa in recent years is the establishment of a number of research hubs involved in AI activities ranging from mobile robotics and computational intelligence, to knowledge representation and reasoning, and human language technologies. In this survey we take the reader through a quick tour of the research being conducted at these hubs, and touch on an initiative to maintain and extend the current level of interest in AI research in the country.}, language = {en} } @article{SchulteTiggesFoersterNikolovskietal.2022, author = {Schulte-Tigges, Joschua and F{\"o}rster, Marco and Nikolovski, Gjorgji and Reke, Michael and Ferrein, Alexander and Kaszner, Daniel and Matheis, Dominik and Walter, Thomas}, title = {Benchmarking of various LiDAR sensors for use in self-driving vehicles in real-world environments}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {19}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s22197146}, pages = {20 Seiten}, year = {2022}, abstract = {Abstract In this paper, we report on our benchmark results of the LiDAR sensors Livox Horizon, Robosense M1, Blickfeld Cube, Blickfeld Cube Range, Velodyne Velarray H800, and Innoviz Pro. The idea was to test the sensors in different typical scenarios that were defined with real-world use cases in mind, in order to find a sensor that meet the requirements of self-driving vehicles. For this, we defined static and dynamic benchmark scenarios. In the static scenarios, both LiDAR and the detection target do not move during the measurement. In dynamic scenarios, the LiDAR sensor was mounted on the vehicle which was driving toward the detection target. We tested all mentioned LiDAR sensors in both scenarios, show the results regarding the detection accuracy of the targets, and discuss their usefulness for deployment in self-driving cars.}, language = {en} } @inproceedings{EltesterFerreinSchiffer2020, author = {Eltester, Niklas Sebastian and Ferrein, Alexander and Schiffer, Stefan}, title = {A smart factory setup based on the RoboCup logistics league}, series = {2020 IEEE Conference on Industrial Cyberphysical Systems (ICPS)}, booktitle = {2020 IEEE Conference on Industrial Cyberphysical Systems (ICPS)}, publisher = {IEEE}, address = {New York, NY}, doi = {10.1109/ICPS48405.2020.9274766}, pages = {297 -- 302}, year = {2020}, abstract = {In this paper we present SMART-FACTORY, a setup for a research and teaching facility in industrial robotics that is based on the RoboCup Logistics League. It is driven by the need for developing and applying solutions for digital production. Digitization receives constantly increasing attention in many areas, especially in industry. The common theme is to make things smart by using intelligent computer technology. Especially in the last decade there have been many attempts to improve existing processes in factories, for example, in production logistics, also with deploying cyber-physical systems. An initiative that explores challenges and opportunities for robots in such a setting is the RoboCup Logistics League. Since its foundation in 2012 it is an international effort for research and education in an intra-warehouse logistics scenario. During seven years of competition a lot of knowledge and experience regarding autonomous robots was gained. This knowledge and experience shall provide the basis for further research in challenges of future production. The focus of our SMART-FACTORY is to create a stimulating environment for research on logistics robotics, for teaching activities in computer science and electrical engineering programmes as well as for industrial users to study and explore the feasibility of future technologies. Building on a very successful history in the RoboCup Logistics League we aim to provide stakeholders with a dedicated facility oriented at their individual needs.}, language = {en} }