Suppose we obseve a dynamical system and we wish to estimate its parameters. One way of doing this is to use a Monte Carlo method. But if the system is chaotic then this approach is highly unlikely to work as the small changes in the proposals for the parameters will result in large changes in the path.
Here's a well-known chaotic dynamical system:
\begin{align} \frac{\mathrm{d}x}{\mathrm{d}t} &= \alpha (y - x), \ \frac{\mathrm{d}y}{\mathrm{d}t} &= x (\rho - z) - y, \ \frac{\mathrm{d}z}{\mathrm{d}t} &= x y - \beta z. \end{align}
And here is its representation in libbi together with some
instructions to generate noisy values of the
library(readr)
model_file_name <- "LorenzGenerate.bi"
writeLines(read_file(model_file_name))
model Lorenz {
const rho = 45.92
const beta = 4.0
const alpha = 16.0
state X, Y, Z
obs X_obs
sub initial {
X ~ log_normal(log(1.0), 0.00002)
Y ~ log_normal(log(1.0), 0.00002)
Z ~ log_normal(log(1.0), 0.00002)
}
sub transition(delta = 0.0001) {
ode {
dX/dt = alpha * (Y - X)
dY/dt = X * (rho - Z) - Y
dZ/dt = X * Y - beta * Z
}
}
sub observation {
X_obs ~ normal(X, 0.2)
}
}
library('rbi')
library(ggplot2)
Lorenz <- bi_model(model_file_name)
T <- 10.0
nObs <- 100
init_parameters <- list(X = 1, Y = 1, Z = 1)
synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz,
init=init_parameters,
noutputs = nObs)
synthetic_data <- bi_read(synthetic_dataset)
synthetic_df <- as.data.frame(synthetic_data)
tail(synthetic_df)
X.time | X.value | Y.time | Y.value | Z.time | Z.value | X_obs.time | X_obs.value | clock | |
---|---|---|---|---|---|---|---|---|---|
96 | 9.5 | -14.764658 | 9.5 | -19.536008 | 9.5 | 39.76409 | 9.5 | -15.044416 | 96309 |
97 | 9.6 | -17.611354 | 9.6 | -14.413804 | 9.6 | 53.75645 | 9.6 | -17.397395 | 96309 |
98 | 9.7 | -9.657817 | 9.7 | -5.782971 | 9.7 | 45.82000 | 9.7 | -10.099852 | 96309 |
99 | 9.8 | -8.019435 | 9.8 | -9.629767 | 9.8 | 35.46636 | 9.8 | -8.105424 | 96309 |
100 | 9.9 | -14.220221 | 9.9 | -19.872234 | 9.9 | 37.34805 | 9.9 | -14.511350 | 96309 |
101 | 10.0 | -18.793750 | 10.0 | -15.760650 | 10.0 | 55.08816 | 10.0 | -18.686090 | 96309 |
p <- ggplot(synthetic_df, aes(X.time)) +
geom_path(aes(y = X.value, colour="alpha 16.0")) +
theme(legend.position="bottom") +
ggtitle("Lorenz") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Time") +
ylab("X Value")
ggsave(filename = "diagrams/xpath.svg", plot = p)
Saving 10 x 5 in image
We can check that the system becomes chaotic, in the sense that small changes in initial conditions lead to qualitatively different behaviour.
path0 <- ggplot() +
theme(legend.position="bottom") +
ggtitle("Lorenz") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Time") +
ylab("Value")
set.seed(42)
T <- 20.0
for (i in c("red", "blue", "green")) {
init_parameters <- list(X = 1 + rnorm(1,0.0,0.01),
Y = 1 + rnorm(1,0.0,0.01),
Z = 1 + rnorm(1,0.0,0.01))
synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz,
init=init_parameters,
noutputs = nObs)
synthetic_data <- bi_read(synthetic_dataset)
synthetic_df <- as.data.frame(synthetic_data)
path0 <- path0 +
geom_line(data = synthetic_df, aes(x = X.time, y = X.value), color = i)
}
ggsave(filename = "diagrams/xpath4.svg", plot = path0)
Saving 10 x 5 in image
Alternatively we can model the parameter as part of the state and assume that it undergoes Brownian Motion. This seems reasonable: the further we go into the future, the less certain we are about its value. An improvement might be to model it as an Ornstein-Uhlenbeck process which is mean-reverting - after all we don't expect the parameter to take on arbitrarily large or small values but let's learn to walk before we learn to run.
Since we expect our parameter to positive let's model it as geometric Brownian Motion.
\begin{equation} \mathrm{d}\alpha = \alpha\sigma\mathrm{d}W_t \end{equation}
By Itô's lemma, we have
\begin{equation} \mathrm{d}(\log \alpha) = -\frac{\sigma^2}{2}\mathrm{d}t + \sigma\mathrm{d}W_t \end{equation}
We can model this in libbi as:
model_file_name <- "LorenzState.bi"
writeLines(read_file(model_file_name))
model LorenzState {
const rho = 45.92
const beta = 4
const h = 0.1; // time step
const delta_abs = 1.0e-3; // absolute error tolerance
const delta_rel = 1.0e-6; // relative error tolerance
state X, Y, Z
state ln_alpha
param mu, sigma
noise w
obs X_obs
sub parameter {
mu ~ uniform(12.0, 20.0)
sigma ~ uniform(0.0, 0.5)
}
sub proposal_parameter {
mu ~ truncated_gaussian(mu, 0.02, 12.0, 20.0);
sigma ~ truncated_gaussian(sigma, 0.01, 0.0, 0.5);
}
sub initial {
X ~ log_normal(log(1.0), 0.2)
Y ~ log_normal(log(1.0), 0.2)
Z ~ log_normal(log(1.0), 0.2)
ln_alpha ~ gaussian(log(mu), sigma)
}
sub transition(delta = h) {
w ~ normal (0.0, sqrt(h))
ode(h = h, atoler = delta_abs, rtoler = delta_rel, alg = 'RK4(3)') {
dX/dt = exp(ln_alpha) * (Y - X)
dY/dt = X * (rho - Z) - Y
dZ/dt = X * Y - beta * Z
dln_alpha/dt = -sigma * sigma / 2 - sigma * w / h
}
}
sub observation {
X_obs ~ normal(X, 0.2)
}
}
We can then run the model.
LorenzState <- bi_model(model_file_name)
bi_state_model <- libbi(model=LorenzState)
bi_state <- filter(bi_state_model,
nparticles = 8192,
nthreads = 1,
end_time = T,
obs = synthetic_dataset,
init = init_parameters,
ess_rel = 1,
sample_obs = TRUE)
bi_file_summary(bi_state$output_file_name)
bi_state
summary(bi_state)
File /private/var/folders/h1/bkwn2ct12hvb6__dzrk5gwx80000gn/T/RtmpQ7zb3k/LorenzState1176a1bafc6c5/LorenzState_output1176a1d4a68c2.nc (NC_FORMAT_NETCDF4):
13 variables (excluding dimension variables):
double time[nr] (Contiguous storage)
double w[np,nr] (Contiguous storage)
double X[np,nr] (Contiguous storage)
double Y[np,nr] (Contiguous storage)
double Z[np,nr] (Contiguous storage)
double ln_alpha[np,nr] (Contiguous storage)
double mu[] (Contiguous storage)
double sigma[] (Contiguous storage)
8 byte int clock[] (Contiguous storage)
int ancestor[np,nr] (Contiguous storage)
double logweight[np,nr] (Contiguous storage)
double loglikelihood[] (Contiguous storage)
double X_obs[np,nr] (Contiguous storage)
2 dimensions:
np Size:8192
nr Size:101
3 global attributes:
libbi_schema: ParticleFilter
libbi_schema_version: 1
libbi_version: 1.4.1
Min. | 1st Qu. | Median | Mean | 3rd Qu. | Max. | |
---|---|---|---|---|---|---|
mu | 14.1060017 | 14.1060017 | 14.1060017 | 14.1060017 | 14.1060017 | 14.1060017 |
sigma | 0.2652258 | 0.2652258 | 0.2652258 | 0.2652258 | 0.2652258 | 0.2652258 |
And then after some manipulation, draw the path of the "state" parameter.
output <- bi_read(bi_state)
logw <- xtabs(value ~ time + np, data = output$logweight, addNA = TRUE)
X <- output$X$value
Y <- output$Y$value
Z <- output$Z$value
A <- output$ln_alpha$value
log2normw <- function(lw){
w <- exp(lw - max(lw))
return(w / sum(w))
}
w = t(apply(X=logw, MARGIN=1, FUN=log2normw))
Xmeans = apply(X = X*w, MARGIN=1, FUN=sum)
Ymeans = apply(X = X*w, MARGIN=1, FUN=sum)
Zmeans = apply(X = Z*w, MARGIN=1, FUN=sum)
Ameans = apply(X = A*w, MARGIN=1, FUN=sum)
synthetic_data <- bi_read(synthetic_dataset)
X_original <- synthetic_data$X$value
Y_original <- synthetic_data$Y$value
Z_original <- synthetic_data$Z$value
synthetic_df <- as.data.frame(synthetic_data)
synthetic_df$Xmeans <- Xmeans
synthetic_df$Ymeans <- Ymeans
synthetic_df$Zmeans <- Zmeans
synthetic_df$Ameans <- Ameans
pAmeans <- ggplot(synthetic_df, aes(X.time)) +
geom_path(aes(y = exp(Ameans), colour="Ameans")) +
theme(legend.position="bottom") +
ggtitle("Lorenz") +
theme(plot.title = element_text(hjust = 0.5)) +
ylim(0.0, max(exp(Ameans))) +
xlab("Time") +
ylab("Value")
simpleWarning in if (class(res$value) == "help_files_with_topic") {: the condition has length > 1 and only the first element will be used
ggsave(filename = "diagrams/xpath5.svg", plot = pAmeans)
Saving 10 x 5 in image
We can try inferring the parameter for the different chaotic solutions.
dataset_list <- list()
parameters_list <- list()
for (i in c(1,2,3)) {
init_parameters <- list(X = 1 + rnorm(1,0.0,0.01),
Y = 1 + rnorm(1,0.0,0.01),
Z = 1 + rnorm(1,0.0,0.01))
parameters_list[[i]] <- init_parameters
synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz,
init=init_parameters,
noutputs = nObs)
dataset_list[[i]] <- synthetic_dataset
}
X_list <- list()
Y_list <- list()
Z_list <- list()
A_list <- list()
for (i in c(1,2,3)) {
bi_state <- filter(bi_state_model, nparticles = 8192, nthreads = 1, end_time = T, obs = dataset_list[[i]], init = parameters_list[[i]], ess_rel = 1, sample_obs = TRUE)
output <- bi_read(bi_state)
logw <- xtabs(value ~ time + np, data = output$logweight, addNA = TRUE)
w = t(apply(X=logw, MARGIN=1, FUN=log2normw))
X <- output$X$value
Y <- output$Y$value
Z <- output$Z$value
A <- output$ln_alpha$value
X_list[[i]] = apply(X = X*w, MARGIN=1, FUN=sum)
Y_list[[i]] = apply(X = X*w, MARGIN=1, FUN=sum)
Z_list[[i]] = apply(X = Z*w, MARGIN=1, FUN=sum)
A_list[[i]] = apply(X = A*w, MARGIN=1, FUN=sum)
}
path2 <- ggplot() +
theme(legend.position="bottom") +
ggtitle("Lorenz") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Time") +
ylab("Value")
simpleWarning in if (class(res$value) == "help_files_with_topic") {: the condition has length > 1 and only the first element will be used
for (i in c(1,2,3)) {
synthetic_data <- bi_read(dataset_list[[i]])
synthetic_df <- as.data.frame(synthetic_data)
synthetic_df$Ameans <- exp(A_list[[i]])
path2 <- path2 + geom_line(data = synthetic_df,
aes(x = X.time, y = Ameans), color = "blue")
}
ggsave(filename = "diagrams/xpath7.svg", plot = path2)
Saving 10 x 5 in image
And if we take the mean of the tails then we get pretty decent estimates of the parameter.
x <- list()
for (i in c(1:3)) {
x[[i]] <- tail(exp(A_list[[i]]), n = 50)
}
for (i in 1:3) print(mean(x[[i]]))
[1] 15.92264
[1] 16.00402
[1] 16.19887
for (i in 1:3) print(sd(x[[i]]))
[1] 1.170018
[1] 0.6070526
[1] 0.763862
This notebook can be downloaded from