In this paper, we provide asymptotic results concerning (generalized) Bayesian inference for certain dynamical systems based on a large deviation approach. Given a sequence of observations $y$, a class of model processes parameterized by $\vartheta\Theta$ which can be characterized as a stochastic process $X^\vartheta$ or a measure $\mu_\vartheta$, and a loss function $L$ which measures the error between $y$ and a realization of $X^\vartheta$, we specify the generalized posterior distribution $\pi_t(\varthetay)$. The goal of this paper is to study the asymptotic behavior of $\pi_t(\varthetay)$ as $t ınfty.$ In particular, we state conditions on the model family $\\mu_\vartheta\_\\vartheta\Theta\$ and the loss function $L$ such that the posterior distribution converges. The two conditions we require are: (1) a conditional large deviation behavior for a single $X^\vartheta$, and (2) an exponential continuity condition over the model family for the map from the parameter $\vartheta$ to the loss incurred between $X^\vartheta$ and the observation sequence $y$. The proposed framework is quite general, we apply it to two very different classes of dynamical systems: continuous time hypermixing processes and Gibbs processes on shifts of finite type. We also show that the generalized posterior distribution concentrates asymptotically on those parameters that minimize the expected loss and a divergence term, hence proving posterior consistency.
%0 Journal Article
%1 Su2021-pj
%A Su, Langxuan
%A Mukherjee, Sayan
%D 2021
%I arXiv
%K
%T A large deviation approach to posterior consistency in dynamical systems
%X In this paper, we provide asymptotic results concerning (generalized) Bayesian inference for certain dynamical systems based on a large deviation approach. Given a sequence of observations $y$, a class of model processes parameterized by $\vartheta\Theta$ which can be characterized as a stochastic process $X^\vartheta$ or a measure $\mu_\vartheta$, and a loss function $L$ which measures the error between $y$ and a realization of $X^\vartheta$, we specify the generalized posterior distribution $\pi_t(\varthetay)$. The goal of this paper is to study the asymptotic behavior of $\pi_t(\varthetay)$ as $t ınfty.$ In particular, we state conditions on the model family $\\mu_\vartheta\_\\vartheta\Theta\$ and the loss function $L$ such that the posterior distribution converges. The two conditions we require are: (1) a conditional large deviation behavior for a single $X^\vartheta$, and (2) an exponential continuity condition over the model family for the map from the parameter $\vartheta$ to the loss incurred between $X^\vartheta$ and the observation sequence $y$. The proposed framework is quite general, we apply it to two very different classes of dynamical systems: continuous time hypermixing processes and Gibbs processes on shifts of finite type. We also show that the generalized posterior distribution concentrates asymptotically on those parameters that minimize the expected loss and a divergence term, hence proving posterior consistency.
@article{Su2021-pj,
abstract = {In this paper, we provide asymptotic results concerning (generalized) Bayesian inference for certain dynamical systems based on a large deviation approach. Given a sequence of observations $y$, a class of model processes parameterized by $\vartheta{}\in \Theta$ which can be characterized as a stochastic process $X^\vartheta$ or a measure $\mu{}_\vartheta$, and a loss function $L$ which measures the error between $y$ and a realization of $X^\vartheta$, we specify the generalized posterior distribution $\pi{}_t(\vartheta{}\mid y)$. The goal of this paper is to study the asymptotic behavior of $\pi{}_t(\vartheta{}\mid y)$ as $t \to \infty.$ In particular, we state conditions on the model family $\{\mu{}_\vartheta{}\}_\{\vartheta{}\in \Theta{}\}$ and the loss function $L$ such that the posterior distribution converges. The two conditions we require are: (1) a conditional large deviation behavior for a single $X^\vartheta$, and (2) an exponential continuity condition over the model family for the map from the parameter $\vartheta$ to the loss incurred between $X^\vartheta$ and the observation sequence $y$. The proposed framework is quite general, we apply it to two very different classes of dynamical systems: continuous time hypermixing processes and Gibbs processes on shifts of finite type. We also show that the generalized posterior distribution concentrates asymptotically on those parameters that minimize the expected loss and a divergence term, hence proving posterior consistency.},
added-at = {2024-09-10T11:56:37.000+0200},
author = {Su, Langxuan and Mukherjee, Sayan},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/243e007e60aece486a95d75da6ad91c98/scadsfct},
interhash = {b8e034a0a689f24069d9424e8efb15d1},
intrahash = {43e007e60aece486a95d75da6ad91c98},
keywords = {},
publisher = {arXiv},
timestamp = {2024-09-10T15:15:57.000+0200},
title = {A large deviation approach to posterior consistency in dynamical systems},
year = 2021
}