Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add generic Bernoulli wrapper around probs,logits version #137

Merged
merged 1 commit into from
May 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 12 additions & 4 deletions funsor/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def eager_log_prob(cls, **params):
# Distribution Wrappers
################################################################################

class Bernoulli(Distribution):
class BernoulliProbs(Distribution):
dist_class = dist.Bernoulli

@staticmethod
Expand All @@ -115,12 +115,12 @@ def _fill_defaults(probs, value='value'):
return probs, value

def __init__(self, probs, value=None):
super(Bernoulli, self).__init__(probs, value)
super(BernoulliProbs, self).__init__(probs, value)


@eager.register(Bernoulli, Tensor, Tensor)
@eager.register(BernoulliProbs, Tensor, Tensor)
def eager_bernoulli(probs, value):
return Bernoulli.eager_log_prob(probs=probs, value=value)
return BernoulliProbs.eager_log_prob(probs=probs, value=value)


class BernoulliLogits(Distribution):
Expand All @@ -142,6 +142,14 @@ def eager_bernoulli_logits(logits, value):
return BernoulliLogits.eager_log_prob(logits=logits, value=value)


def Bernoulli(probs=None, logits=None, value='value'):
if probs is not None:
return BernoulliProbs(probs, value)
if logits is not None:
return BernoulliLogits(logits, value)
raise ValueError('Either probs or logits must be specified')


class Beta(Distribution):
dist_class = dist.Beta

Expand Down
24 changes: 16 additions & 8 deletions test/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ def beta(concentration1, concentration0, value):


@pytest.mark.parametrize('batch_shape', [(), (5,), (2, 3)], ids=str)
@pytest.mark.parametrize('eager', [False, True])
def test_bernoulli_density(batch_shape, eager):
@pytest.mark.parametrize('syntax', ['eager', 'lazy', 'generic'])
def test_bernoulli_probs_density(batch_shape, syntax):
batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

Expand All @@ -60,15 +60,19 @@ def bernoulli(probs, value):
check_funsor(expected, inputs, reals())

d = Variable('value', reals())
actual = dist.Bernoulli(probs, value) if eager else \
dist.Bernoulli(probs, d)(value=value)
if syntax == 'eager':
actual = dist.BernoulliProbs(probs, value)
elif syntax == 'lazy':
actual = dist.BernoulliProbs(probs, d)(value=value)
elif syntax == 'generic':
actual = dist.Bernoulli(probs=probs)(value=value)
check_funsor(actual, inputs, reals())
assert_close(actual, expected)


@pytest.mark.parametrize('batch_shape', [(), (5,), (2, 3)], ids=str)
@pytest.mark.parametrize('eager', [False, True])
def test_bernoulli_logits_density(batch_shape, eager):
@pytest.mark.parametrize('syntax', ['eager', 'lazy', 'generic'])
def test_bernoulli_logits_density(batch_shape, syntax):
batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

Expand All @@ -84,8 +88,12 @@ def bernoulli(logits, value):
check_funsor(expected, inputs, reals())

d = Variable('value', reals())
actual = dist.BernoulliLogits(logits, value) if eager else \
dist.BernoulliLogits(logits, d)(value=value)
if syntax == 'eager':
actual = dist.BernoulliLogits(logits, value)
elif syntax == 'lazy':
actual = dist.BernoulliLogits(logits, d)(value=value)
elif syntax == 'generic':
actual = dist.Bernoulli(logits=logits)(value=value)
check_funsor(actual, inputs, reals())
assert_close(actual, expected)

Expand Down