Naturally inspired designs of training environments for reinforcement learning (RL) often suffer from highly skewed encounter probabilities, with a small subset of experiences being encountered frequently, while extreme experiences remain rare. Despite recent algorithmic advancements, research has demonstrated that such environments present significant challenges for reinforcement learning algorithms. In this study, we first demonstrate that traditional designs in training environments for RL-based dynamic obstacle avoidance show extremely unbalanced probabilities for obstacle encounters in a way that high-risk scenarios with multiple threatening obstacles are rare. To address this limitation, we propose a traffic-type-independent training environment that allows us to exert control over the difficulty of obstacle encounter experiences. This allows us to customarily shift obstacle encounter probabilities towards high-risk experiences, which are assessed via two metrics: The number of obstacles involved and an existing collision risk metric. Our findings reveal that shifting the training focus towards higher-risk experiences, from which the agent learns, significantly improves the final performance of the agent. To validate the generalizability of our approach, we designed and evaluated two realistic use cases: a mobile robot and a maritime ship facing the threat of approaching obstacles. In both applications, we observed consistent results, underscoring the broad applicability of our proposed approach across various application contexts and independent of the agent’s dynamics. Furthermore, we introduced Gaussian noise to the sensor signals and incorporated different non-linear obstacle behaviors, which resulted in only marginal performance degradation. This demonstrates the robustness of the trained agent in handling environmental uncertainties.
%0 Journal Article
%1 HART2024127097
%A Hart, Fabian
%A Okhrin, Ostap
%D 2024
%J Neurocomputing
%K Collision Dynamic Reinforcement Training avoidance environment learning metric obstacle risk
%P 127097
%R https://doi.org/10.1016/j.neucom.2023.127097
%T Enhanced method for reinforcement learning based dynamic obstacle avoidance by assessment of collision risk
%U https://www.sciencedirect.com/science/article/pii/S0925231223012201
%V 568
%X Naturally inspired designs of training environments for reinforcement learning (RL) often suffer from highly skewed encounter probabilities, with a small subset of experiences being encountered frequently, while extreme experiences remain rare. Despite recent algorithmic advancements, research has demonstrated that such environments present significant challenges for reinforcement learning algorithms. In this study, we first demonstrate that traditional designs in training environments for RL-based dynamic obstacle avoidance show extremely unbalanced probabilities for obstacle encounters in a way that high-risk scenarios with multiple threatening obstacles are rare. To address this limitation, we propose a traffic-type-independent training environment that allows us to exert control over the difficulty of obstacle encounter experiences. This allows us to customarily shift obstacle encounter probabilities towards high-risk experiences, which are assessed via two metrics: The number of obstacles involved and an existing collision risk metric. Our findings reveal that shifting the training focus towards higher-risk experiences, from which the agent learns, significantly improves the final performance of the agent. To validate the generalizability of our approach, we designed and evaluated two realistic use cases: a mobile robot and a maritime ship facing the threat of approaching obstacles. In both applications, we observed consistent results, underscoring the broad applicability of our proposed approach across various application contexts and independent of the agent’s dynamics. Furthermore, we introduced Gaussian noise to the sensor signals and incorporated different non-linear obstacle behaviors, which resulted in only marginal performance degradation. This demonstrates the robustness of the trained agent in handling environmental uncertainties.
@article{HART2024127097,
abstract = {Naturally inspired designs of training environments for reinforcement learning (RL) often suffer from highly skewed encounter probabilities, with a small subset of experiences being encountered frequently, while extreme experiences remain rare. Despite recent algorithmic advancements, research has demonstrated that such environments present significant challenges for reinforcement learning algorithms. In this study, we first demonstrate that traditional designs in training environments for RL-based dynamic obstacle avoidance show extremely unbalanced probabilities for obstacle encounters in a way that high-risk scenarios with multiple threatening obstacles are rare. To address this limitation, we propose a traffic-type-independent training environment that allows us to exert control over the difficulty of obstacle encounter experiences. This allows us to customarily shift obstacle encounter probabilities towards high-risk experiences, which are assessed via two metrics: The number of obstacles involved and an existing collision risk metric. Our findings reveal that shifting the training focus towards higher-risk experiences, from which the agent learns, significantly improves the final performance of the agent. To validate the generalizability of our approach, we designed and evaluated two realistic use cases: a mobile robot and a maritime ship facing the threat of approaching obstacles. In both applications, we observed consistent results, underscoring the broad applicability of our proposed approach across various application contexts and independent of the agent’s dynamics. Furthermore, we introduced Gaussian noise to the sensor signals and incorporated different non-linear obstacle behaviors, which resulted in only marginal performance degradation. This demonstrates the robustness of the trained agent in handling environmental uncertainties.},
added-at = {2024-11-12T14:31:07.000+0100},
author = {Hart, Fabian and Okhrin, Ostap},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2d30b3a3f703b51ee6f9a9f722c9292a8/scadsfct},
doi = {https://doi.org/10.1016/j.neucom.2023.127097},
interhash = {d17cae12685832dcc68f9fe4f863aeea},
intrahash = {d30b3a3f703b51ee6f9a9f722c9292a8},
issn = {0925-2312},
journal = {Neurocomputing},
keywords = {Collision Dynamic Reinforcement Training avoidance environment learning metric obstacle risk},
pages = 127097,
timestamp = {2024-11-12T14:41:41.000+0100},
title = {Enhanced method for reinforcement learning based dynamic obstacle avoidance by assessment of collision risk},
url = {https://www.sciencedirect.com/science/article/pii/S0925231223012201},
volume = 568,
year = 2024
}