diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 4b217249..2bcda544 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -21,8 +21,9 @@ jobs: python setup.py sdist version=$(head -1 cosmosis/version.py | cut -d "'" -f 2) sha=$(shasum -a 256 dist/cosmosis-${version}.tar.gz | awk '{print $1}') - echo "::set-output name=tarball-sha::${sha}" - echo "::set-output name=pypi-version::${version}" + echo "pypi-version=${version}" >> $GITHUB_OUTPUT + echo "tarball-sha=${sha}" >> $GITHUB_OUTPUT + - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/cosmosis/runtime/pipeline.py b/cosmosis/runtime/pipeline.py index 709cb155..c3e8f08b 100644 --- a/cosmosis/runtime/pipeline.py +++ b/cosmosis/runtime/pipeline.py @@ -390,9 +390,11 @@ def __init__(self, arg=None, load=True, modules=None): else: self.modules = [] + + self.shortcut_module=0 + self.shortcut_data=None + if self.modules: - self.shortcut_module=0 - self.shortcut_data=None if shortcut is not None: try: index = module_list.index(shortcut) @@ -408,8 +410,6 @@ def __init__(self, arg=None, load=True, modules=None): print("and use the cached results from the first run for everything before that.") print("except the input parameter values. Think about this to check it's what you want.") self.shortcut_module = index - self.shortcut_module=0 - self.shortcut_data=None @@ -1366,6 +1366,94 @@ def likelihood(self, p, return_data=False, all_params=False): else: return like, extra_saves + @classmethod + def from_likelihood_function(cls, log_likelihood_function, param_ranges, priors=None, debug=False, derived=None): + """ + Make a pipeline from a simple likelihood function. + + Parameters + ---------- + log_likelihood_function : function + A function that takes a list of parameters and returns either a single + number (the log-likelihood) or a tuple of two things, the first being + the log-likelihood and the second being a dictionary of extra derived + parameters. + param_ranges : list of tuples + A list of tuples of the form (min, starting_point, max) for each parameter. + priors : list of tuples, optional + A dictionary if priors i the form name:prior (see documentation for prior format). + If not specified then uniform priors are used. + debug : bool, optional + If True then exceptions in the likelihood function will be raised. + If False then they will be ignored and the likelihood will be set to -inf. + derived : list of strings, optional + A list of names of derived parameters to save in the output. + + Returns + ------- + pipeline : LikelihoodPipeline + A pipeline object that can be run. + """ + nparam = len(param_ranges) + + def setup(options): + return {} + + def execute(block, config): + parameters = np.array([block["params", f"p{i}"] for i in range(nparam)]) + try: + p = log_likelihood_function(parameters) + except: + if debug: + raise + else: + return 1 + + if derived is None: + like = p + else: + like = p[0] + extra = p[1] + + if not isinstance(extra, dict): + raise ValueError("The extra output from the likelihood function must be a dictionary") + + block['likelihoods', 'a_like'] = like + for key, value in extra.items(): + block['derived', key] = value + + return 0 + + mod = module.FunctionModule("log_likelihood_function", setup, execute) + + parameters = { + f"p{i}": f"{param_ranges[i][0]} {param_ranges[i][1]} {param_ranges[i][2]}" + for i in range(nparam) + } + + debug_str = "T" if debug else "F" + extra_saves = " ".join([f"derived/{d}" for d in derived]) if derived is not None else "" + + config = { + "pipeline": { + "likelihoods": "a", + "debug": debug_str, + "extra_output": extra_saves, + } + } + + values = { + "params": parameters + } + + priors = [{ + "params": priors + + }] + + pipeline = cls(config, values = values, modules=[mod], priors=priors) + return pipeline + def config_to_block(relevant_sections, options): diff --git a/cosmosis/test/test_pipeline.py b/cosmosis/test/test_pipeline.py index 3a42301c..fa538d87 100644 --- a/cosmosis/test/test_pipeline.py +++ b/cosmosis/test/test_pipeline.py @@ -237,7 +237,28 @@ def test_prior_override(): assert p1.max() < 1.0 assert p1.min() > -1.0 +def test_pipeline_from_function(): + priors = { + "p0": "gaussian 1.0 0.5", + } + + def log_like(p): + r1 = np.sum(np.abs(p)) + return -0.5 * np.sum(p**2), {"r1": r1} + + param_ranges = [ + (-3.0, 0.0, 3.0), + (-3.0, 0.0, 3.0), + (-3.0, 0.0, 3.0), + (-3.0, 0.0, 3.0), + ] + + derived = ["r1"] + pipeline = LikelihoodPipeline.from_likelihood_function(log_like, param_ranges, priors=priors, derived=derived, debug=True) + r = pipeline.run_results([0,0,0,0]) + assert r.like == 0.0 + assert r.extra[0] == 0.0