From 81882474f01747c24ed86db85ac4a05e1e0e8b58 Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:20:59 +0100 Subject: [PATCH 1/6] unit test for sim_cat --- tests/test_evaluations.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 025e7425..3651bba0 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -3,7 +3,7 @@ import unittest from csep.core.poisson_evaluations import _simulate_catalog, _poisson_likelihood_test -from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray +from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray, _simulate_catalog def get_datadir(): @@ -78,14 +78,39 @@ def test_likelihood(self): class TestBinomialLikelihood(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.seed = 0 + numpy.random.seed(self.seed) self.forecast_data = numpy.array([[0.1, 0.3, 0.4], [0.2, 0.1, 0.1]]) self.observed_data = numpy.array([[0, 1, 2], [1, 1, 0]]) + self.random_matrix = numpy.random.rand(1, 9) def test_likelihood(self): bill = binary_joint_log_likelihood_ndarray(self.forecast_data, self.observed_data) numpy.testing.assert_allclose(bill, -6.7197988064) + def test_simulate_active_cells(self): + #With fixed seed we get the same random numbers if we get all the number at once to one by one. + #Making sure random number generated by seed 0 match. + expected_random_numbers =numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, + 0.4375872112626925, 0.8917730007820798, 0.9636627605010293]]) + + numpy.testing.assert_allclose(expected_random_numbers, self.random_matrix) + + #We can expect the following catalog, if we get the above random numbers. + #We get 4 active cells after 9th random sample. + expected_catalog = [0, 0, 1, 1, 1, 1] + + sampling_weights = numpy.cumsum(self.forecast_data.ravel()) / numpy.sum(self.forecast_data) + sim_fore = numpy.zeros(sampling_weights.shape) + obs_active_cells = len(np.unique(np.nonzero(observed_data.ravel()))) + #resetting seed again to 0, to make sure _simulate_catalog uses this. + seed = 0 + numpy.random.seed(seed) + sim_fore = _simulate_catalog(obs_active_cells, sampling_weights, sim_fore) + numpy.testing.assert_allclose(expected_catalog, sim_fore) + + if __name__ == '__main__': unittest.main() From 21ddaad3488085d6f7c5f89993cf86081ba2e79d Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:30:16 +0100 Subject: [PATCH 2/6] testing_likelihood --- tests/test_evaluations.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 3651bba0..9eb15587 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -3,7 +3,7 @@ import unittest from csep.core.poisson_evaluations import _simulate_catalog, _poisson_likelihood_test -from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray, _simulate_catalog +from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray, _simulate_catalog, _binary_likelihood_test def get_datadir(): @@ -109,6 +109,14 @@ def test_simulate_active_cells(self): numpy.random.seed(seed) sim_fore = _simulate_catalog(obs_active_cells, sampling_weights, sim_fore) numpy.testing.assert_allclose(expected_catalog, sim_fore) + def test_llikelihood(self): + qs, bill, simulated_ll = _binomial_likelihood_test(forecast_data, observed_data, num_simulations=1,seed=0, verbose=True) + numpy.testing.assert_allclose(bill, -6.7197988064) + numpy.testing.assert_allclose(qs, 1) + numpy.testing.assert_allclose(simulated_ll[0], -7.921741654647629) + + + From eb19a4441751690ed6db5046e6ddcc340bc35e50 Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:33:29 +0100 Subject: [PATCH 3/6] name correction --- tests/test_evaluations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 9eb15587..73dbff0d 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -90,7 +90,7 @@ def test_likelihood(self): numpy.testing.assert_allclose(bill, -6.7197988064) def test_simulate_active_cells(self): - #With fixed seed we get the same random numbers if we get all the number at once to one by one. + #With fixed seed we get the same random numbers if we get all the number at once or one by one. #Making sure random number generated by seed 0 match. expected_random_numbers =numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293]]) @@ -109,8 +109,8 @@ def test_simulate_active_cells(self): numpy.random.seed(seed) sim_fore = _simulate_catalog(obs_active_cells, sampling_weights, sim_fore) numpy.testing.assert_allclose(expected_catalog, sim_fore) - def test_llikelihood(self): - qs, bill, simulated_ll = _binomial_likelihood_test(forecast_data, observed_data, num_simulations=1,seed=0, verbose=True) + def test_binomial_likelihood(self): + qs, bill, simulated_ll = _binary_likelihood_test(forecast_data, observed_data, num_simulations=1,seed=0, verbose=True) numpy.testing.assert_allclose(bill, -6.7197988064) numpy.testing.assert_allclose(qs, 1) numpy.testing.assert_allclose(simulated_ll[0], -7.921741654647629) From b56d33ba7ee86919a4d49edc9064ea4b8c85d5cf Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:41:19 +0100 Subject: [PATCH 4/6] minor corrections --- tests/test_evaluations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 73dbff0d..31cb8063 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -103,14 +103,14 @@ def test_simulate_active_cells(self): sampling_weights = numpy.cumsum(self.forecast_data.ravel()) / numpy.sum(self.forecast_data) sim_fore = numpy.zeros(sampling_weights.shape) - obs_active_cells = len(np.unique(np.nonzero(observed_data.ravel()))) + obs_active_cells = len(np.unique(np.nonzero(self.observed_data.ravel()))) #resetting seed again to 0, to make sure _simulate_catalog uses this. seed = 0 numpy.random.seed(seed) sim_fore = _simulate_catalog(obs_active_cells, sampling_weights, sim_fore) numpy.testing.assert_allclose(expected_catalog, sim_fore) def test_binomial_likelihood(self): - qs, bill, simulated_ll = _binary_likelihood_test(forecast_data, observed_data, num_simulations=1,seed=0, verbose=True) + qs, bill, simulated_ll = _binary_likelihood_test(self.forecast_data,self.observed_data, num_simulations=1,seed=0, verbose=True) numpy.testing.assert_allclose(bill, -6.7197988064) numpy.testing.assert_allclose(qs, 1) numpy.testing.assert_allclose(simulated_ll[0], -7.921741654647629) From 6b12645c0d107e684626dc7d0084b35ae7934d01 Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:49:14 +0100 Subject: [PATCH 5/6] correct indent --- tests/test_evaluations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 31cb8063..836f52ba 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -89,7 +89,7 @@ def test_likelihood(self): numpy.testing.assert_allclose(bill, -6.7197988064) - def test_simulate_active_cells(self): + def test_simulate_active_cells(self): #With fixed seed we get the same random numbers if we get all the number at once or one by one. #Making sure random number generated by seed 0 match. expected_random_numbers =numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, From 2dd39f4c8ab65e91b171a9829a07d403e6bc2aae Mon Sep 17 00:00:00 2001 From: khawajasim Date: Thu, 17 Nov 2022 17:59:23 +0100 Subject: [PATCH 6/6] fixing imports --- tests/test_evaluations.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/tests/test_evaluations.py b/tests/test_evaluations.py index 836f52ba..1bd89181 100644 --- a/tests/test_evaluations.py +++ b/tests/test_evaluations.py @@ -2,8 +2,10 @@ import numpy import unittest -from csep.core.poisson_evaluations import _simulate_catalog, _poisson_likelihood_test -from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray, _simulate_catalog, _binary_likelihood_test +import csep.core.poisson_evaluations as poisson +import csep.core.binomial_evaluations as binary +#from csep.core.poisson_evaluations import _simulate_catalog, _poisson_likelihood_test +#from csep.core.binomial_evaluations import binary_joint_log_likelihood_ndarray, _simulate_catalog, _binary_likelihood_test def get_datadir(): @@ -48,21 +50,21 @@ def test_simulate_catalog(self): # this is taken from the test likelihood function sim_fore = numpy.empty(sampling_weights.shape) - sim_fore = _simulate_catalog(num_events, sampling_weights, sim_fore, + sim_fore = poisson._simulate_catalog(num_events, sampling_weights, sim_fore, random_numbers=self.random_matrix) # final statement numpy.testing.assert_allclose(expected_catalog, sim_fore) # test again to ensure that fill works properply - sim_fore = _simulate_catalog(num_events, sampling_weights, sim_fore, + sim_fore = poisson._simulate_catalog(num_events, sampling_weights, sim_fore, random_numbers=self.random_matrix) # final statement numpy.testing.assert_allclose(expected_catalog, sim_fore) def test_likelihood(self): - qs, obs_ll, simulated_ll = _poisson_likelihood_test(self.forecast_data, self.observed_data, num_simulations=1, + qs, obs_ll, simulated_ll = poisson._poisson_likelihood_test(self.forecast_data, self.observed_data, num_simulations=1, random_numbers=self.random_matrix, use_observed_counts=True) # very basic result to pass "laugh" test @@ -85,14 +87,14 @@ def __init__(self, *args, **kwargs): self.random_matrix = numpy.random.rand(1, 9) def test_likelihood(self): - bill = binary_joint_log_likelihood_ndarray(self.forecast_data, self.observed_data) + bill = binary.binary_joint_log_likelihood_ndarray(self.forecast_data, self.observed_data) numpy.testing.assert_allclose(bill, -6.7197988064) def test_simulate_active_cells(self): #With fixed seed we get the same random numbers if we get all the number at once or one by one. #Making sure random number generated by seed 0 match. - expected_random_numbers =numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, + expected_random_numbers = numpy.array([[0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, 0.64589411, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293]]) numpy.testing.assert_allclose(expected_random_numbers, self.random_matrix) @@ -103,22 +105,18 @@ def test_simulate_active_cells(self): sampling_weights = numpy.cumsum(self.forecast_data.ravel()) / numpy.sum(self.forecast_data) sim_fore = numpy.zeros(sampling_weights.shape) - obs_active_cells = len(np.unique(np.nonzero(self.observed_data.ravel()))) + obs_active_cells = len(numpy.unique(numpy.nonzero(self.observed_data.ravel()))) #resetting seed again to 0, to make sure _simulate_catalog uses this. seed = 0 numpy.random.seed(seed) - sim_fore = _simulate_catalog(obs_active_cells, sampling_weights, sim_fore) + sim_fore = binary._simulate_catalog(obs_active_cells, sampling_weights, sim_fore) numpy.testing.assert_allclose(expected_catalog, sim_fore) def test_binomial_likelihood(self): - qs, bill, simulated_ll = _binary_likelihood_test(self.forecast_data,self.observed_data, num_simulations=1,seed=0, verbose=True) + qs, bill, simulated_ll = binary._binary_likelihood_test(self.forecast_data,self.observed_data, num_simulations=1,seed=0, verbose=True) numpy.testing.assert_allclose(bill, -6.7197988064) numpy.testing.assert_allclose(qs, 1) numpy.testing.assert_allclose(simulated_ll[0], -7.921741654647629) - - - - if __name__ == '__main__': unittest.main()