@inproceedings{f11c67e49261439691f6442b9f90c0b1,
title = "Probabilistic Guarantees for Safe Deep Reinforcement Learning",
abstract = "Deep reinforcement learning has been successfully applied to many control tasks, but the application of such controllers in safety-critical scenarios has been limited due to safety concerns. Rigorous testing of these controllers is challenging, particularly when they operate in probabilistic environments due to, for example, hardware faults or noisy sensors. We propose MOSAIC, an algorithm for measuring the safety of deep reinforcement learning controllers in stochastic settings. Our approach is based on the iterative construction of a formal abstraction of a controller{\textquoteright}s execution in an environment, and leverages probabilistic model checking of Markov decision processes to produce probabilistic guarantees on safe behaviour over a finite time horizon. It produces bounds on the probability of safe operation of the controller for different initial configurations and identifies regions where correct behaviour can be guaranteed. We implement and evaluate our approach on controllers trained for several benchmark control problems.",
author = "Edoardo Bacci and David Parker",
year = "2020",
month = jun,
day = "29",
language = "English",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
editor = "Nathalie Bertrand and Nils Jansen",
booktitle = "Proceedings of 18th International Conference on Formal Modelling and Analysis of Timed Systems (FORMATS 2020)",
note = "18th International Conference on Formal Modelling and Analysis of Timed Systems (FORMATS 2020) ; Conference date: 01-09-2020 Through 03-09-2020",
}