In the autonomous driving field, fusion of human knowledge into Deep Reinforcement Learning (DRL) is often based on the human demonstration recorded in a simulated environment. This limits the generalization and the feasibility of application in real-world traffic. We propose a two-stage DRL method to train a car-following agent, that modifies the policy by leveraging the real-world human driving experience and achieves performance superior to the pure DRL agent. Training a DRL agent is done within CARLA framework with Robot Operating System (ROS). For evaluation, we designed different driving scenarios to compare the proposed two-stage DRL car-following agent with other agents. After extracting the “good” behavior from the human driver, the agent becomes more efficient and reasonable, which makes this autonomous agent more suitable to Human–Robot Interaction (HRI) traffic.
%0 Journal Article
%1 LI2023103987
%A Li, Dianzhao
%A Okhrin, Ostap
%D 2023
%J Transportation Research Part C: Emerging Technologies
%K CARLA, Car-following DRL, ROS Real dataset, driving model,
%P 103987
%R https://doi.org/10.1016/j.trc.2022.103987
%T Modified DDPG car-following model with a real-world human driving experience with CARLA simulator
%U https://www.sciencedirect.com/science/article/pii/S0968090X22004004
%V 147
%X In the autonomous driving field, fusion of human knowledge into Deep Reinforcement Learning (DRL) is often based on the human demonstration recorded in a simulated environment. This limits the generalization and the feasibility of application in real-world traffic. We propose a two-stage DRL method to train a car-following agent, that modifies the policy by leveraging the real-world human driving experience and achieves performance superior to the pure DRL agent. Training a DRL agent is done within CARLA framework with Robot Operating System (ROS). For evaluation, we designed different driving scenarios to compare the proposed two-stage DRL car-following agent with other agents. After extracting the “good” behavior from the human driver, the agent becomes more efficient and reasonable, which makes this autonomous agent more suitable to Human–Robot Interaction (HRI) traffic.
@article{LI2023103987,
abstract = {In the autonomous driving field, fusion of human knowledge into Deep Reinforcement Learning (DRL) is often based on the human demonstration recorded in a simulated environment. This limits the generalization and the feasibility of application in real-world traffic. We propose a two-stage DRL method to train a car-following agent, that modifies the policy by leveraging the real-world human driving experience and achieves performance superior to the pure DRL agent. Training a DRL agent is done within CARLA framework with Robot Operating System (ROS). For evaluation, we designed different driving scenarios to compare the proposed two-stage DRL car-following agent with other agents. After extracting the “good” behavior from the human driver, the agent becomes more efficient and reasonable, which makes this autonomous agent more suitable to Human–Robot Interaction (HRI) traffic.},
added-at = {2024-09-10T10:41:24.000+0200},
author = {Li, Dianzhao and Okhrin, Ostap},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/29bf37efe1a5ffdaa9a37410158d71095/scadsfct},
doi = {https://doi.org/10.1016/j.trc.2022.103987},
interhash = {1110541046c9d0ee4f3f504739339f6c},
intrahash = {9bf37efe1a5ffdaa9a37410158d71095},
issn = {0968-090X},
journal = {Transportation Research Part C: Emerging Technologies},
keywords = {CARLA, Car-following DRL, ROS Real dataset, driving model,},
pages = 103987,
timestamp = {2024-09-10T10:45:25.000+0200},
title = {Modified DDPG car-following model with a real-world human driving experience with CARLA simulator},
url = {https://www.sciencedirect.com/science/article/pii/S0968090X22004004},
volume = 147,
year = 2023
}