# Imports import torch import gpytorch from botorch import fit_gpytorch_mll from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement from botorch.sampling.normal import SobolQMCNormalSampler from botorch.models.gp_regression import SingleTaskGP from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood from botorch.settings import debug as botorch_debug from botorch.acquisition.objective import ScalarizedPosteriorTransform botorch_debug._set_state(True) # Five equispaced samples from a simple sine curve over [0,2pi] # Each sample contains function evaluation and derivative information # (I fixed the training data because this consistently causes an error # with the derivative enabled case. Sometimes different training data # don't cause errors) def get_fixed_training_data(): train_x = torch.tensor([[0. ], [1.57079637], [3.14159274], [4.71238899], [6.28318548]], dtype=torch.float64) train_y_dy = torch.tensor([[ 0.03286297, 0.97644126], [ 0.98860252, -0.06822324], [-0.00680325, -0.90437853], [-0.96307969, 0.01945158], [-0.07107084, 0.98940557]], dtype=torch.float64) return train_x, train_y_dy[:,0].unsqueeze(1), train_y_dy[:,1].unsqueeze(1), train_y_dy # Obtain the value of our acquisition function at x def get_acqf(x): # Obtain training data train_x, train_y, _, _ = get_fixed_training_data() # Get and fit our model model = SingleTaskGP(train_x, train_y) mll = ExactMarginalLogLikelihood(model.likelihood, model) fit_gpytorch_mll(mll) # This is here since we use a posterior transform in the derivative enabled example too, # I'm just trying to reduce any variability between the two examples scal_transf = ScalarizedPosteriorTransform(weights=torch.tensor([1.0], dtype=torch.float64)) # Define qNEI acquisition function # I got the sampler from here: # https://botorch.org/api/_modules/botorch/acquisition/monte_carlo.html#qNoisyExpectedImprovement sampler = SobolQMCNormalSampler(1024) qNEI = qNoisyExpectedImprovement(model,\ train_x,\ sampler,\ posterior_transform=scal_transf) # Calculate and return qNEI value return qNEI(torch.tensor([[x]], dtype=torch.float64)) # This often fails at `torch._C._LinAlgError: linalg.cholesky` for the derivative enabled case print("qNEI at 0:", get_acqf(0))