diff --git a/csep/core/binomial_evaluations.py b/csep/core/binomial_evaluations.py index ab97f793..58fd7f50 100644 --- a/csep/core/binomial_evaluations.py +++ b/csep/core/binomial_evaluations.py @@ -152,7 +152,7 @@ def _binary_likelihood_test(forecast_data, observed_data, num_simulations=1000, # data structures to store results sim_fore = numpy.zeros(sampling_weights.shape) simulated_ll = [] - n_active_cells = len(np.unique(np.nonzero(observed_data.ravel()))) + n_active_cells = len(numpy.unique(numpy.nonzero(observed_data.ravel()))) n_fore = numpy.sum(forecast_data) expected_forecast_count = int(n_active_cells) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 71557478..c7468528 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -2,9 +2,8 @@ import numpy import unittest -from csep.core.poisson_evaluations import _simulate_catalog, _poisson_likelihood_test -from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray - +import csep.core.poisson_evaluations as poisson +import csep.core.binomial_evaluations as binary def get_datadir(): root_dir = os.path.dirname(os.path.abspath(__file__)) @@ -48,21 +47,21 @@ def test_simulate_catalog(self): # this is taken from the test likelihood function sim_fore = numpy.empty(sampling_weights.shape) - sim_fore = _simulate_catalog(num_events, sampling_weights, sim_fore, + sim_fore = poisson._simulate_catalog(num_events, sampling_weights, sim_fore, random_numbers=self.random_matrix) # final statement numpy.testing.assert_allclose(expected_catalog, sim_fore) # test again to ensure that fill works properply - sim_fore = _simulate_catalog(num_events, sampling_weights, sim_fore, + sim_fore = poisson._simulate_catalog(num_events, sampling_weights, sim_fore, random_numbers=self.random_matrix) # final statement numpy.testing.assert_allclose(expected_catalog, sim_fore) def test_likelihood(self): - qs, obs_ll, simulated_ll = _poisson_likelihood_test(self.forecast_data, self.observed_data, num_simulations=1, + qs, obs_ll, simulated_ll = poisson._poisson_likelihood_test(self.forecast_data, self.observed_data, num_simulations=1, random_numbers=self.random_matrix, use_observed_counts=True) # very basic result to pass "laugh" test @@ -78,13 +77,42 @@ def test_likelihood(self): class TestBinomialLikelihood(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.seed = 0 + numpy.random.seed(self.seed) self.forecast_data = numpy.array([[0.1, 0.3, 0.4], [0.2, 0.1, 0.1]]) self.observed_data = numpy.array([[0, 1, 2], [1, 1, 0]]) + self.random_matrix = numpy.random.rand(1, 9) def test_joint_likelihood_calculation(self): - bill = binary_joint_log_likelihood_ndarray(self.forecast_data, self.observed_data) + bill = binary.binary_joint_log_likelihood_ndarray(self.forecast_data, self.observed_data) + numpy.testing.assert_allclose(bill, -6.7197988064) + def test_simulate_active_cells(self): + #With fixed seed we get the same random numbers if we get all the number at once or one by one. + #Making sure random number generated by seed 0 match. + expected_random_numbers = numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, + 0.4375872112626925, 0.8917730007820798, 0.9636627605010293]]) + + numpy.testing.assert_allclose(expected_random_numbers, self.random_matrix) + + #We can expect the following catalog, if we get the above random numbers. + #We get 4 active cells after 9th random sample. + expected_catalog = [0, 0, 1, 1, 1, 1] + + sampling_weights = numpy.cumsum(self.forecast_data.ravel()) / numpy.sum(self.forecast_data) + sim_fore = numpy.zeros(sampling_weights.shape) + obs_active_cells = len(numpy.unique(numpy.nonzero(self.observed_data.ravel()))) + #resetting seed again to 0, to make sure _simulate_catalog uses this. + seed = 0 + numpy.random.seed(seed) + sim_fore = binary._simulate_catalog(obs_active_cells, sampling_weights, sim_fore) + numpy.testing.assert_allclose(expected_catalog, sim_fore) + + def test_binomial_likelihood(self): + qs, bill, simulated_ll = binary._binary_likelihood_test(self.forecast_data,self.observed_data, num_simulations=1,seed=0, verbose=True) numpy.testing.assert_allclose(bill, -6.7197988064) + numpy.testing.assert_allclose(qs, 1) + numpy.testing.assert_allclose(simulated_ll[0], -7.921741654647629) if __name__ == '__main__':