Skip to content

Commit

Permalink
updated all likelihoods
Browse files Browse the repository at this point in the history
  • Loading branch information
kylajones committed Jul 22, 2024
1 parent fe2cb96 commit b5c3917
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 27 deletions.
22 changes: 17 additions & 5 deletions linfa/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,16 +328,28 @@ def train(self, nf, optimizer, iteration, log, sampling=True, t=1):

# Free energy bound
if(self.model_logprior is None):
# test1 = (- torch.sum(sum_log_abs_det_jacobians, 1)).mean()
# test2 = (self.model_logdensity(xk)).mean()
sum_log_jac = (- torch.sum(sum_log_abs_det_jacobians, 1)).mean()
likelihood = -t * (self.model_logdensity(xk)).mean()
prior = (self.model_logdensity(xk)).mean()*0
# print(test1,test2)
loss = (- torch.sum(sum_log_abs_det_jacobians, 1) - t * self.model_logdensity(xk)).mean()

else:
## - E[sum log(|det(J)|)]
# This number is positive
sum_log_jac = (- torch.sum(sum_log_abs_det_jacobians, 1)).mean()
likelihood = - t * (self.model_logdensity(xk)).mean()
prior = - t * (self.model_logprior(xk)).mean()
# print(test1,test2,test3)

## - E[log p(x|theta)]
# This number is positive which means the log prior is negative (good)
likelihood = - t * (self.model_logdensity(xk)).mean()

## - E[log p(theta)]
# This number is positive which means the log density is negative (good)
prior = - t * (self.model_logprior(xk)).mean()

## Loss = - E[log-likelihood] - E[log prior] - E[sum log det Jacobian]
loss = (- torch.sum(sum_log_abs_det_jacobians, 1) - t * (self.model_logdensity(xk) + self.model_logprior(xk))).mean()

optimizer.zero_grad()
loss.backward()
optimizer.step()
Expand Down
9 changes: 4 additions & 5 deletions linfa/tests/test_no_disc_TP15_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def log_density(calib_inputs, model, surrogate, transform):
discrepancy = surrogate.forward(model.var_in)

# Get the absolute values of the standard deviation (num_var)
stds = langmuir_model.defOut * langmuir_model.stdRatio
stds = torch.mean(langmuir_model.defOut) * langmuir_model.stdRatio

# Get data - (num_var x num_obs)
Data = torch.tensor(langmuir_model.data[:,2:]).to(exp.device)
Expand All @@ -135,11 +135,11 @@ def log_density(calib_inputs, model, surrogate, transform):
l1 = -0.5 * np.prod(langmuir_model.data.shape[1]) * np.log(2.0 * np.pi)

# - 1 / 2 * sum_{i=1} ^ N log (sigma_i)
l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(torch.prod(stds))).item()
l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(stds)).item()

# - 1 / 2 * sum_{i = 1} ^ N {(eta_i + disc_i - y_i)^2 / sigma_i^2)}
l3 = -0.5 * torch.sum(((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0)) / stds.t())**2, dim = 1)

if(False):
print('Compare')
print('%15s %15s %15s %15s' % ('lf out','discrep','lf+discr','obs'))
Expand All @@ -164,15 +164,14 @@ def log_density(calib_inputs, model, surrogate, transform):
exp.model_logdensity = lambda x: log_density(x, exp.model, exp.surrogate, exp.transform)

# Define log prior
# TODO: can we use a half-normal or truncated normal prior? How to enforce bounds?
def log_prior(calib_inputs, transform):
# Compute transformation log Jacobian
adjust = transform.compute_log_jacob_func(calib_inputs)
# Compute the calibration inputs in the physical domain
phys_inputs = transform.forward(calib_inputs)
# Define prior moments
pr_avg = torch.tensor([[1E3, -21E3]])
pr_std = torch.tensor([[1E2, 500]])
pr_std = torch.tensor([[1E2, 5E2]])
# Eval log prior
l1 = -0.5 * calib_inputs.size(1) * np.log(2.0 * np.pi)
l2 = (-0.5 * torch.log(torch.prod(pr_std))).item()
Expand Down
22 changes: 15 additions & 7 deletions linfa/tests/test_no_disc_TP15_uniform.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def run_test():

exp = experiment()
exp.name = "TP15_no_disc_uniform"
exp.flow_type = 'realnvp' # str: Type of flow (default 'realnvp') # TODO: generalize to work for TP1
exp.flow_type = 'realnvp' # str: Type of flow (default 'realnvp')
exp.n_blocks = 15 # int: Number of hidden layers
exp.hidden_size = 100 # int: Hidden layer size for MADE in each layer (default 100)
exp.n_hidden = 1 # int: Number of hidden layers in each MADE
Expand Down Expand Up @@ -121,19 +121,27 @@ def log_density(calib_inputs, model, surrogate, transform):
discrepancy = surrogate.forward(model.var_in)

# Get the absolute values of the standard deviation (num_var)
stds = langmuir_model.defOut * langmuir_model.stdRatio
stds = torch.mean(langmuir_model.defOut) * langmuir_model.stdRatio

# Get data - (num_var x num_obs)
Data = torch.tensor(langmuir_model.data[:,2:]).to(exp.device)
num_obs = Data.size(1)

# Evaluate log-likelihood:
# Loop on the available observations
for loopA in range(num_obs):
l1 = -0.5 * np.prod(langmuir_model.data.shape) * np.log(2.0 * np.pi)
l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(torch.prod(stds))).item()
l3 = -0.5 * torch.sum(((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0)) / stds.t())**2, dim = 1)

# -n/2 * log(2 pi)
#l1_old= -0.5 * np.prod(langmuir_model.data.shape) * np.log(2.0 * np.pi)
l1 = -0.5 * np.prod(langmuir_model.data.shape[1]) * np.log(2.0 * np.pi)
#print(l1)
#print(l1_test)
#exit()
# l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(torch.prod(stds))).item()
# -n/2 * log(stds^2)
l2 = -0.5 * langmuir_model.data.shape[1] * torch.log(stds)
# -n/2 * Sum_{(model - data)/std}^2
l3 = -0.5 * torch.sum(((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0)) / stds)**2, dim = 1)

if(False):
print('Compare')
print('%15s %15s %15s %15s' % ('lf out','discrep','lf+discr','obs'))
Expand Down
13 changes: 10 additions & 3 deletions linfa/tests/test_no_disc_TP1_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ def run_test():

# Read data
exp.model.data = np.loadtxt('observations.csv', delimiter = ',', skiprows = 1)
# FOR REPRODUCBILITY WITH MCMC: manually replace with observation used with MCMC
exp.model.data[2] = 0.542957759886306

if(len(exp.model.data.shape) < 2):
exp.model.data = np.expand_dims(exp.model.data, axis=0)
Expand Down Expand Up @@ -130,9 +132,14 @@ def log_density(calib_inputs, model, surrogate, transform):
# Evaluate log-likelihood:
# Loop on the available observations
for loopA in range(num_obs):
# -n / 2 * log ( 2 pi )
l1 = -0.5 * np.prod(langmuir_model.data.shape) * np.log(2.0 * np.pi)
l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(torch.prod(stds))).item()
l3 = -0.5 * torch.sum(((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0)) / stds.t())**2, dim = 1)

# - 1 / 2 * sum_{i=1} ^ N log (sigma_i)
l2 = (-0.5 * langmuir_model.data.shape[1] * torch.log(stds)).item()

# - 1 / 2 * sum_{i = 1} ^ N {(eta_i + disc_i - y_i)^2 / sigma_i^2)}
l3 = -0.5 * torch.sum(((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0)) / stds)**2, dim = 1)

if(False):
print('Compare')
Expand Down Expand Up @@ -166,7 +173,7 @@ def log_prior(calib_inputs, transform):
phys_inputs = transform.forward(calib_inputs)
# Define prior moments
pr_avg = torch.tensor([[1E3, -21E3]])
pr_std = torch.tensor([[1E2, 500]])
pr_std = torch.tensor([[1E2, 5E2]])
# Eval log prior
l1 = -0.5 * calib_inputs.size(1) * np.log(2.0 * np.pi)
l2 = (-0.5 * torch.log(torch.prod(pr_std))).item()
Expand Down
5 changes: 0 additions & 5 deletions linfa/tests/test_no_disc_TP1_uniform.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,6 @@ def run_test():
output_size = 1,
var_grid_in = var_grid_in,
var_grid_out = var_grid_out)
# Initially tune on the default values of the calibration variables
# exp.surrogate.update(langmuir_model.defParams, exp.surr_pre_it, 0.03, 0.9999, 100, store=True)
# exp.surrogate.update(langmuir_model.defParams, 1, 0.03, 0.9999, 100, store=True)
# Load the surrogate
# exp.surrogate.surrogate_loaCCd()
else:
exp.surrogate = None

Expand Down
4 changes: 2 additions & 2 deletions run_plot_res.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#python3 linfa/plot_res.py --folder results/ --name TP15_no_disc_error_estimation --iter 10000 --picformat png
python3 linfa/plot_res.py --folder results/ --name TP15_no_disc_gaussian --iter 10000 --picformat png
# python3 linfa/plot_disc.py --folder results/ --name test_08_lf_w_disc_TP1_uniform_prior --iter 25000 --picformat png --mode histograms --num_points 10 --limfactor 1.0 --saveinterval 1000 --dropouts 10
# python3 linfa/plot_disc.py --folder results/ --name test_19_lf_w_disc_TP15_rep_meas_dropout --iter 10000 --picformat png --mode discr_surface --num_points 10 --limfactor 1.0 --saveinterval 1000
# python3 linfa/plot_disc.py --folder results/ --name test_08_lf_w_disc_TP1_uniform_prior --iter 25000 --picformat png --mode marginal_stats --num_points 10 --limfactor 1.0 --saveinterval 1000
# python3 linfa/plot_disc.py --folder results/ --name TP1_no_disc_gaussian_prior --iter 10000 --picformat png --mode marginal_posterior --num_points 10 --limfactor 1.0 --saveinterval 1000
python3 plot_mcmc_and_linfa.py --folder results/ --name TP15_no_disc_error_estimation --iter 10000 --picformat png
python3 plot_mcmc_and_linfa.py --folder results/ --name TP15_no_disc_gaussian --iter 10000 --picformat png

0 comments on commit b5c3917

Please sign in to comment.