-
Notifications
You must be signed in to change notification settings - Fork 36
/
Copy pathopoc2009.py
131 lines (117 loc) · 7.11 KB
/
opoc2009.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import numpy as np # engine for numerical computing
from pypop7.optimizers.es.es import ES # abstract class of all Evolution Strategies (ES) classes
from pypop7.optimizers.es.opoc2006 import OPOC2006 # (1+1)-Cholesky-CMA-ES 2006 (OPOC2006)
class OPOC2009(OPOC2006):
"""(1+1)-Cholesky-CMA-ES 2009 (OPOC2009).
Parameters
----------
problem : `dict`
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : `dict`
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`, default: `np.inf`),
* 'max_runtime' - maximal runtime to be allowed (`float`, default: `np.inf`),
* 'seed_rng' - seed for random number generation needed to be *explicitly* set (`int`).
and with the following particular settings (`keys`):
* 'sigma' - initial global step-size, aka mutation strength (`float`),
* 'mean' - initial (starting) point, aka mean of Gaussian search distribution (`array_like`),
* if not given, it will draw a random sample from the uniform distribution whose search range is
bounded by `problem['lower_boundary']` and `problem['upper_boundary']`.
Examples
--------
Use the black-box optimizer `OPOC2009` to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rosenbrock_function>`_:
.. code-block:: python
:linenos:
>>> import numpy # engine for numerical computing
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.es.opoc2009 import OPOC2009
>>> problem = {'fitness_function': rosenbrock, # to define problem arguments
... 'ndim_problem': 2,
... 'lower_boundary': -5.0*numpy.ones((2,)),
... 'upper_boundary': 5.0*numpy.ones((2,))}
>>> options = {'max_function_evaluations': 5000, # to set optimizer options
... 'seed_rng': 2022,
... 'mean': 3.0*numpy.ones((2,)),
... 'sigma': 3.0} # global step-size may need to be fine-tuned for better performance
>>> opoc2009 = OPOC2009(problem, options) # to initialize the optimizer class
>>> results = opoc2009.optimize() # to run the optimization/evolution process
>>> print(f"OPOC2009: {results['n_function_evaluations']}, {results['best_so_far_y']}")
OPOC2009: 5000, 4.4227e-17
For its correctness checking of Python coding, please refer to `this code-based repeatability report
<https://github.com/Evolutionary-Intelligence/pypop/blob/main/pypop7/optimizers/es/_repeat_opoc2009.py>`_
for all details. For *pytest*-based automatic testing, please see `test_opoc2009.py
<https://github.com/Evolutionary-Intelligence/pypop/blob/main/pypop7/optimizers/es/test_opoc2009.py>`_.
References
----------
Suttorp, T., Hansen, N. and Igel, C., 2009.
`Efficient covariance matrix update for variable metric evolution strategies.
<https://link.springer.com/article/10.1007/s10994-009-5102-1>`_
Machine Learning, 75(2), pp.167-197.
"""
def __init__(self, problem, options):
OPOC2006.__init__(self, problem, options)
def initialize(self, args=None, is_restart=False):
mean, y, a, best_so_far_y, p_s = OPOC2006.initialize(self, args, is_restart)
p_c = np.zeros((self.ndim_problem,)) # evolution path
a_i = np.diag(np.ones((self.ndim_problem,))) # inverse of Cholesky factors
return mean, y, a, a_i, best_so_far_y, p_s, p_c
def iterate(self, mean=None, a=None, a_i=None, best_so_far_y=None, p_s=None, p_c=None, args=None):
# sample and evaluate only one offspring
z = self.rng_optimization.standard_normal((self.ndim_problem,))
x = mean + self.sigma*np.dot(a, z)
y = self._evaluate_fitness(x, args)
l_s = 1 if y <= best_so_far_y else 0
p_s = (1.0 - self.c_p)*p_s + self.c_p*l_s
self.sigma *= np.exp(self.lr_sigma*(p_s - self.p_ts)/(1.0 - self.p_ts))
if y <= best_so_far_y:
mean, best_so_far_y = x, y
if p_s < self.p_t:
p_c = (1.0 - self.c_c)*p_c + np.sqrt(self.c_c*(2.0 - self.c_c))*np.dot(a, z)
alpha = 1.0 - self.c_cov
else:
p_c *= 1.0 - self.c_c
alpha = 1.0 - self.c_cov + self.c_cov*self.c_c*(2.0 - self.c_c)
beta = self.c_cov
w = np.dot(a_i, p_c)
w_norm = np.power(np.linalg.norm(w), 2)
s_w_norm = np.sqrt(1.0 + beta/alpha*w_norm)
a = np.sqrt(alpha)*a + np.sqrt(alpha)/w_norm*(s_w_norm - 1.0)*np.dot(
p_c[:, np.newaxis], w[np.newaxis, :])
a_i = 1.0/np.sqrt(alpha)*a_i - 1.0/(np.sqrt(alpha)*w_norm)*(1.0 - 1.0/s_w_norm)*np.dot(
w[:, np.newaxis], np.dot(w[np.newaxis, :], a_i))
return mean, y, a, a_i, best_so_far_y, p_s, p_c
def restart_reinitialize(self, mean=None, y=None, a=None, a_i=None, best_so_far_y=None,
p_s=None, p_c=None, fitness=None, args=None):
self._list_fitness.append(best_so_far_y)
is_restart_1, is_restart_2 = self.sigma < self.sigma_threshold, False
if len(self._list_fitness) >= self.stagnation:
is_restart_2 = (self._list_fitness[-self.stagnation] - self._list_fitness[-1]) < self.fitness_diff
is_restart = bool(is_restart_1) or bool(is_restart_2)
if is_restart:
self._print_verbose_info(fitness, y, True)
if self.verbose:
print(' ....... *** restart *** .......')
self._n_restart += 1
self._list_generations.append(self._n_generations) # for each restart
self._n_generations = 0
self.sigma = np.copy(self._sigma_bak)
mean, y, a, a_i, best_so_far_y, p_s, p_c = self.initialize(args, True)
self._list_fitness = [best_so_far_y]
return mean, y, a, a_i, best_so_far_y, p_s, p_c
def optimize(self, fitness_function=None, args=None): # for all generations (iterations)
fitness = ES.optimize(self, fitness_function)
mean, y, a, a_i, best_so_far_y, p_s, p_c = self.initialize(args)
while not self._check_terminations():
self._print_verbose_info(fitness, y)
mean, y, a, a_i, best_so_far_y, p_s, p_c = self.iterate(
mean, a, a_i, best_so_far_y, p_s, p_c, args)
self._n_generations += 1
if self.is_restart:
mean, y, a, a_i, best_so_far_y, p_s, p_c = self.restart_reinitialize(
mean, y, a, a_i, best_so_far_y, p_s, p_c, fitness, args)
return self._collect(fitness, y, mean)