diff --git a/augur/analyze.py b/augur/analyze.py index 5217720..39eb50c 100644 --- a/augur/analyze.py +++ b/augur/analyze.py @@ -128,14 +128,15 @@ def __init__(self, config, likelihood=None, tools=None, req_params=None, raise ValueError(f'The requested parameter {var} is not \ in the list of parameters in the likelihood.') # Cast to numpy array (this will be done later anyway) - self.x = np.array(self.x) + self.x = np.array(self.x).astype(np.float64) self.par_bounds = np.array(self.par_bounds) if (len(self.par_bounds) < 1) & (self.norm_step): self.norm_step = False warnings.warn('Parameter bounds not provided -- the step will not be normalized') # Normalize the pivot point given the sampling region if self.norm_step: - self.norm = self.par_bounds[:, 1] - self.par_bounds[:, 0] + self.norm = np.array(self.par_bounds[:, 1]).astype(np.float64) - \ + np.array(self.par_bounds[:, 0]).astype(np.float64) def f(self, x, labels, pars_fid, sys_fid, donorm=False): """ @@ -166,10 +167,10 @@ def f(self, x, labels, pars_fid, sys_fid, donorm=False): raise ValueError('The labels should have the same length as the parameters!') else: if isinstance(x, list): - x = np.array(x) + x = np.array(x).astype(np.float64) # If we normalize the sampling we need to undo the normalization if donorm: - x = self.norm * x + self.par_bounds[:, 0] + x = self.norm * x + np.array(self.par_bounds[:, 0]).astype(np.float64) if x.ndim == 1: _pars = pars_fid.copy() @@ -221,7 +222,9 @@ def get_derivatives(self, force=False, method='5pt_stencil', step=None): if (self.derivatives is None) or (force): if '5pt_stencil' in method: if self.norm_step: - x_here = (self.x - self.par_bounds[:, 0]) * 1/self.norm + print(self.x) + x_here = (self.x - np.array(self.par_bounds[:, 0]).astype(np.float64)) \ + * 1/self.norm else: x_here = self.x self.derivatives = five_pt_stencil(lambda y: self.f(y, self.var_pars, self.pars_fid, @@ -234,7 +237,9 @@ def get_derivatives(self, force=False, method='5pt_stencil', step=None): else: ndkwargs = {} if self.norm_step: - x_here = (self.x - self.par_bounds[:, 0]) * 1/self.norm + print(self.x) + x_here = (self.x - np.array(self.par_bounds[:, 0]).astype(np.float64)) \ + * 1/self.norm else: x_here = self.x jacobian_calc = nd.Jacobian(lambda y: self.f(y, self.var_pars, self.pars_fid, diff --git a/examples/config_test.yml b/examples/config_test.yml index a093cc5..69ea3f1 100644 --- a/examples/config_test.yml +++ b/examples/config_test.yml @@ -5,6 +5,7 @@ cosmo: Omega_b : 0.0491685 h : 0.6727 n_s : 0.9645 + #A_s : 2.105e-9 sigma8 : 0.831 extra_parameters : camb : @@ -108,7 +109,8 @@ fisher: 'src0_delta_z', 'src1_delta_z', 'src2_delta_z', 'src3_delta_z', 'src4_delta_z'] # parameters: # TODO: For now priors are ignored # Omega_c: [0.1, 0.26, 0.9] - # sigma8: [0.4, 0.81, 1.2] + # A_s: [1e-9, 4e-9] + # #sigma8: [0.4, 0.81, 1.2] # w0: [-1.8, -1.0, -0.2] # wa: [-4, 0.0, 0.5] # h: [0.5, 0.6727, 0.8]