@article{SchifferFerrein2018, author = {Schiffer, Stefan and Ferrein, Alexander}, title = {ERIKA—Early Robotics Introduction at Kindergarten Age}, series = {Multimodal Technologies Interact}, volume = {2}, journal = {Multimodal Technologies Interact}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2414-4088}, doi = {10.3390/mti2040064}, pages = {15}, year = {2018}, abstract = {In this work, we report on our attempt to design and implement an early introduction to basic robotics principles for children at kindergarten age. One of the main challenges of this effort is to explain complex robotics contents in a way that pre-school children could follow the basic principles and ideas using examples from their world of experience. What sets apart our effort from other work is that part of the lecturing is actually done by a robot itself and that a quiz at the end of the lesson is done using robots as well. The humanoid robot Pepper from Softbank, which is a great platform for human-robot interaction experiments, was used to present a lecture on robotics by reading out the contents to the children making use of its speech synthesis capability. A quiz in a Runaround-game-show style after the lecture activated the children to recap the contents they acquired about how mobile robots work in principle. In this quiz, two LEGO Mindstorm EV3 robots were used to implement a strongly interactive scenario. Besides the thrill of being exposed to a mobile robot that would also react to the children, they were very excited and at the same time very concentrated. We got very positive feedback from the children as well as from their educators. To the best of our knowledge, this is one of only few attempts to use a robot like Pepper not as a tele-teaching tool, but as the teacher itself in order to engage pre-school children with complex robotics contents.}, language = {en} } @inproceedings{RekePeterSchulteTiggesetal.2020, author = {Reke, Michael and Peter, Daniel and Schulte-Tigges, Joschua and Schiffer, Stefan and Ferrein, Alexander and Walter, Thomas and Matheis, Dominik}, title = {A Self-Driving Car Architecture in ROS2}, series = {2020 International SAUPEC/RobMech/PRASA Conference, Cape Town, South Africa}, booktitle = {2020 International SAUPEC/RobMech/PRASA Conference, Cape Town, South Africa}, isbn = {978-1-7281-4162-6}, doi = {10.1109/SAUPEC/RobMech/PRASA48453.2020.9041020}, pages = {1 -- 6}, year = {2020}, language = {en} } @inproceedings{NikolovskiRekeElsenetal.2021, author = {Nikolovski, Gjorgji and Reke, Michael and Elsen, Ingo and Schiffer, Stefan}, title = {Machine learning based 3D object detection for navigation in unstructured environments}, series = {2021 IEEE Intelligent Vehicles Symposium Workshops (IV Workshops)}, booktitle = {2021 IEEE Intelligent Vehicles Symposium Workshops (IV Workshops)}, publisher = {IEEE}, isbn = {978-1-6654-7921-9}, doi = {10.1109/IVWorkshops54471.2021.9669218}, pages = {236 -- 242}, year = {2021}, abstract = {In this paper we investigate the use of deep neural networks for 3D object detection in uncommon, unstructured environments such as in an open-pit mine. While neural nets are frequently used for object detection in regular autonomous driving applications, more unusual driving scenarios aside street traffic pose additional challenges. For one, the collection of appropriate data sets to train the networks is an issue. For another, testing the performance of trained networks often requires tailored integration with the particular domain as well. While there exist different solutions for these problems in regular autonomous driving, there are only very few approaches that work for special domains just as well. We address both the challenges above in this work. First, we discuss two possible ways of acquiring data for training and evaluation. That is, we evaluate a semi-automated annotation of recorded LIDAR data and we examine synthetic data generation. Using these datasets we train and test different deep neural network for the task of object detection. Second, we propose a possible integration of a ROS2 detector module for an autonomous driving platform. Finally, we present the performance of three state-of-the-art deep neural networks in the domain of 3D object detection on a synthetic dataset and a smaller one containing a characteristic object from an open-pit mine.}, language = {en} } @inproceedings{NeumannDuelbergSchifferetal.2016, author = {Neumann, Tobias and D{\"u}lberg, Enno and Schiffer, Stefan and Ferrein, Alexander}, title = {A rotating platform for swift acquisition of dense 3D point clouds}, series = {Intelligent Robotics and Applications: 9th International Conference, ICIRA 2016, Tokyo, Japan, August 22-24, 2016, Proceedings, Part I}, volume = {9834}, booktitle = {Intelligent Robotics and Applications: 9th International Conference, ICIRA 2016, Tokyo, Japan, August 22-24, 2016, Proceedings, Part I}, publisher = {Springer}, isbn = {978-3-319-43505-3 (Print)}, doi = {10.1007/978-3-319-43506-0_22}, pages = {257 -- 268}, year = {2016}, language = {en} } @inproceedings{MatareSchifferFerrein2019, author = {Matar{\´e}, Victor and Schiffer, Stefan and Ferrein, Alexander}, title = {golog++ : An integrative system design}, series = {CogRob 2018. Cognitive Robotics Workshop : Proceedings of the 11th Cognitive Robotics Workshop 2018 co-located with 16th International Conference on Principles of Knowledge Representation and Reasoning (KR 2018) Tempe, AZ, USA, October 27th, 2018}, booktitle = {CogRob 2018. Cognitive Robotics Workshop : Proceedings of the 11th Cognitive Robotics Workshop 2018 co-located with 16th International Conference on Principles of Knowledge Representation and Reasoning (KR 2018) Tempe, AZ, USA, October 27th, 2018}, editor = {Steinbauer, Gerald and Ferrein, Alexander}, issn = {1613-0073}, pages = {29 -- 35}, year = {2019}, language = {en} } @article{LimpertWiesenFerreinetal.2019, author = {Limpert, Nicolas and Wiesen, Patrick and Ferrein, Alexander and Kallweit, Stephan and Schiffer, Stefan}, title = {The ROSIN Project and its Outreach to South Africa}, series = {R\&D Journal}, volume = {35}, journal = {R\&D Journal}, pages = {1 -- 6}, year = {2019}, language = {en} } @inproceedings{LimpertSchifferFerrein2015, author = {Limpert, Nicolas and Schiffer, Stefan and Ferrein, Alexander}, title = {A Local Planner for Ackermann-Driven Vehicles in ROS SBPL}, series = {Proceedings of the International Conference on Pattern Recognition Association of South Africa and Robotics and Mechatronics (PRASA-RobMech), 2015}, booktitle = {Proceedings of the International Conference on Pattern Recognition Association of South Africa and Robotics and Mechatronics (PRASA-RobMech), 2015}, doi = {10.1109/RoboMech.2015.7359518}, pages = {172 -- 177}, year = {2015}, language = {en} } @inproceedings{KirschMatareFerreinetal.2020, author = {Kirsch, Maximilian and Matar{\´e}, Victor and Ferrein, Alexander and Schiffer, Stefan}, title = {Integrating golog++ and ROS for Practical and Portable High-level Control}, series = {12th International Conference on Agents and Artificial Intelligence}, booktitle = {12th International Conference on Agents and Artificial Intelligence}, doi = {10.5220/0008984406920699}, year = {2020}, language = {en} } @inproceedings{HofmannMatareSchifferetal.2018, author = {Hofmann, Till and Matar{\´e}, Victor and Schiffer, Stefan and Ferrein, Alexander and Lakemeyer, Gerhard}, title = {Constraint-based online transformation of abstract plans into executable robot actions}, series = {Proceedings of the 2018 AAAI Spring Symposium on Integrating Representation, Reasoning, Learning, and Execution for Goal Directed Autonomy}, booktitle = {Proceedings of the 2018 AAAI Spring Symposium on Integrating Representation, Reasoning, Learning, and Execution for Goal Directed Autonomy}, pages = {549 -- 553}, year = {2018}, language = {en} } @incollection{GoeckelSchifferWagneretal.2015, author = {Goeckel, Tom and Schiffer, Stefan and Wagner, Hermann and Lakemeyer, Gerhard}, title = {The Video Conference Tool Robot ViCToR}, series = {Intelligent Robotics and Applications : 8th International Conference, ICIRA 2015, Portsmouth, UK, August 24-27, 2015, Proceedings, Part II}, booktitle = {Intelligent Robotics and Applications : 8th International Conference, ICIRA 2015, Portsmouth, UK, August 24-27, 2015, Proceedings, Part II}, publisher = {Springer}, isbn = {978-3-319-22876-1}, doi = {10.1007/978-3-319-22876-1_6}, pages = {61 -- 73}, year = {2015}, abstract = {We present a robotic tool that autonomously follows a conversation to enable remote presence in video conferencing. When humans participate in a meeting with the help of video conferencing tools, it is crucial that they are able to follow the conversation both with acoustic and visual input. To this end, we design and implement a video conferencing tool robot that uses binaural sound source localization as its main source to autonomously orient towards the currently talking speaker. To increase robustness of the acoustic cue against noise we supplement the sound localization with a source detection stage. Also, we include a simple onset detector to retain fast response times. Since we only use two microphones, we are confronted with ambiguities on whether a source is in front or behind the device. We resolve these ambiguities with the help of face detection and additional moves. We tailor the system to our target scenarios in experiments with a four minute scripted conversation. In these experiments we evaluate the influence of different system settings on the responsiveness and accuracy of the device.}, language = {en} }