Skip to content

Commit 47e6705

Browse files
Merge pull request #6 from esa/khan-boundaries
Khan boundaries gradient fix
2 parents 653b75e + bc9d9fc commit 47e6705

File tree

4 files changed

+66
-36
lines changed

4 files changed

+66
-36
lines changed

.gitlab-ci.yml

+20-2
Original file line numberDiff line numberDiff line change
@@ -163,12 +163,30 @@ test-wheels:
163163
needs:
164164
- python-wheels:manylinux2014
165165

166-
publish-test:
166+
publish-gitlab:
167167
stage: deploy
168168
image: python:3.9
169169
script:
170170
- pip install twine
171-
# upload to the project pypi (whl) (change testpypi to pypi to upload to PyPi)
171+
# upload to repo (ESA Gitlab)
172+
- twine upload --non-interactive --disable-progress-bar --verbose -u gitlab-ci-token -p $CI_JOB_TOKEN --repository-url https://gitlab.esa.int/api/v4/projects/$CI_PROJECT_ID/packages/pypi pyoptgra-*.whl
173+
- twine upload --non-interactive --disable-progress-bar --verbose -u gitlab-ci-token -p $CI_JOB_TOKEN --repository-url https://gitlab.esa.int/api/v4/projects/$CI_PROJECT_ID/packages/pypi pyoptgra-*.tar.gz
174+
175+
needs:
176+
- python-sdist
177+
- test-wheels
178+
- python-wheels:manylinux2014
179+
only:
180+
refs:
181+
- master
182+
when: manual
183+
184+
publish-testpypi:
185+
stage: deploy
186+
image: python:3.9
187+
script:
188+
- pip install twine
189+
# upload to the project pypi (whl)
172190
- twine upload --non-interactive --disable-progress-bar --verbose --repository testpypi -u __token__ -p $TEST_PYPI_TOKEN pyoptgra-*.whl
173191
# upload to the project pypi (sdist)
174192
- twine upload --non-interactive --disable-progress-bar --verbose --repository testpypi -u __token__ -p $TEST_PYPI_TOKEN pyoptgra-*.tar.gz

pyoptgra/optgra.py

+40-32
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# and https://essr.esa.int/license/european-space-agency-community-license-v2-4-weak-copyleft
1414

1515
from math import isfinite
16-
from collections import deque
1716
from typing import Callable, List, Optional, Tuple, Union
1817

1918
import numpy as np
@@ -29,7 +28,7 @@
2928

3029

3130
class khan_function:
32-
"""Function to smothly enforce optimisation parameter bounds as Michal Khan used to do:
31+
r"""Function to smothly enforce optimisation parameter bounds as Michal Khan used to do:
3332
3433
.. math::
3534
@@ -59,9 +58,14 @@ def __init__(self, lb: List[float], ub: List[float], unity_gradient: bool = True
5958
self._ub = np.asarray(ub)
6059
self._nx = len(lb)
6160

62-
# determine finite lower/upper bounds
63-
finite_lb = np.isfinite(self._lb)
64-
finite_ub = np.isfinite(self._ub)
61+
# determine finite lower/upper bounds\
62+
def _isfinite(a: np.ndarray):
63+
"""Custom _ function"""
64+
almost_infinity = 1e300
65+
return np.logical_and(np.isfinite(a), np.abs(a) < almost_infinity)
66+
67+
finite_lb = _isfinite(self._lb)
68+
finite_ub = _isfinite(self._ub)
6569

6670
# we only support cases where both lower and upper bounds are finite if given
6771
check = np.where(finite_lb != finite_ub)[0]
@@ -71,8 +75,14 @@ def __init__(self, lb: List[float], ub: List[float], unity_gradient: bool = True
7175
"must be finite."
7276
f"Detected mismatch at decision vector indices: {check}"
7377
)
78+
79+
# also exclude parameters where lower and upper bounds are identical
80+
with np.errstate(invalid="ignore"):
81+
# we ignore RuntimeWarning: invalid value encountered in subtract
82+
nonzero_diff = abs(self._lb - self._ub) > 1e-9
83+
7484
# store the mask of finite bounds
75-
self.mask = finite_ub
85+
self.mask = np.logical_and(finite_ub, nonzero_diff)
7686
self._lb_masked = self._lb[self.mask]
7787
self._ub_masked = self._ub[self.mask]
7888

@@ -93,12 +103,14 @@ def _eval_inv(self, x_masked: np.ndarray) -> np.ndarray:
93103
arg = (2 * x_masked - self._ub_masked - self._lb_masked) / (
94104
self._ub_masked - self._lb_masked
95105
)
96-
if np.any((arg < -1.0) | (arg > 1.0)):
106+
107+
clip_value = 1.0 - 1e-8 # avoid boundaries
108+
if np.any((arg < -clip_value) | (arg > clip_value)):
97109
print(
98110
"WARNING: Numerical inaccuracies encountered during khan_function inverse.",
99111
"Clipping parameters to valid range.",
100112
)
101-
arg = np.clip(arg, -1.0, 1.0)
113+
arg = np.clip(arg, -clip_value, clip_value)
102114
return (np.arcsin(arg) - self._b) / self._a
103115

104116
def _eval_grad(self, x_khan_masked: np.ndarray) -> np.ndarray:
@@ -249,31 +261,26 @@ def wrapped_fitness(x):
249261
# if Khan function is used, we first need to convert to pagmo parameters
250262
x = khanf.eval(x_khan=x)
251263

252-
fixed_x = x
253-
lb, ub = problem.get_bounds()
254-
255264
if force_bounds:
256-
for i in range(problem.get_nx()):
257-
if x[i] < lb[i]:
258-
fixed_x[i] = lb[i]
259-
if x[i] > ub[i]:
260-
fixed_x[i] = ub[i]
265+
fixed_x = np.clip(x, lb, ub)
266+
else:
267+
fixed_x = x
261268

262-
result = deque(problem.fitness(fixed_x))
269+
# call pagmo fitness function
270+
result = problem.fitness(fixed_x)
263271

264272
# add constraints derived from box bounds
265273
if bounds_to_constraints:
266-
for i in range(len(lb)):
267-
if isfinite(lb[i]):
268-
result.append(x[i] - lb[i])
274+
# Add (x[i] - lb[i]) for finite lb[i] and (ub[i] - x[i]) for finite ub[i]
275+
result = np.concatenate(
276+
[result, (x - lb)[np.isfinite(lb)], (ub - x)[np.isfinite(ub)]]
277+
)
269278

270-
for i in range(len(ub)):
271-
if isfinite(ub[i]):
272-
result.append(ub[i] - x[i])
279+
# reorder constraint order, optgra expects the merit function last, pagmo has it first
280+
# equivalent to rotating in a dequeue
281+
result = np.concatenate([result[1:], result[0:1]])
273282

274-
# optgra expects the fitness last, pagmo has the fitness first
275-
result.rotate(-1)
276-
return list(result)
283+
return result.tolist() # return a list
277284

278285
return wrapped_fitness
279286

@@ -284,17 +291,14 @@ def _wrap_gradient_func(
284291
force_bounds=False,
285292
khanf: Optional[khan_function] = None,
286293
):
294+
287295
# get the sparsity pattern to index the sparse gradients
288296
sparsity_pattern = problem.gradient_sparsity()
289297
f_indices, x_indices = sparsity_pattern.T # Unpack indices
290298

291299
# expected shape of the non-sparse gradient matrix
292300
shape = (problem.get_nf(), problem.get_nx())
293301

294-
# get problem parameters
295-
lb, ub = problem.get_bounds()
296-
nx = problem.get_nx()
297-
298302
def wrapped_gradient(x):
299303

300304
# we are using vectorisation internally -> convert to ndarray
@@ -304,6 +308,10 @@ def wrapped_gradient(x):
304308
# if Khan function is used, we first need to convert to pagmo parameters
305309
x = khanf.eval(x_khan=x)
306310

311+
# get problem parameters
312+
lb, ub = problem.get_bounds()
313+
nx = problem.get_nx()
314+
307315
# force parameters to lower and upper bounds if needed
308316
if force_bounds:
309317
fixed_x = np.clip(x, lb, ub)
@@ -321,15 +329,15 @@ def wrapped_gradient(x):
321329
result[f_indices, x_indices] = sparse_values
322330

323331
# add box-derived constraints
332+
result = result.tolist()
324333
if bounds_to_constraints:
325-
326334
# lower bound gradients
327335
finite_indices = np.isfinite(lb) # Boolean mask for valid indices
328336
box_lb_grads = np.eye(nx)[finite_indices]
329337

330338
# upper bound gradients
331339
finite_indices = np.isfinite(ub) # Boolean mask for valid indices
332-
box_ub_grads = np.eye(nx)[finite_indices]
340+
box_ub_grads = -1.0 * np.eye(nx)[finite_indices]
333341

334342
# append box bounds to gradient matrix
335343
result = np.concatenate([result, box_lb_grads, box_ub_grads])

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ license = {text = "GPL-3.0 or ESCL-2.4"}
1212
name = "pyoptgra"
1313
readme = "README.rst"
1414
requires-python = ">=3.9"
15-
version = "1.0.0"
15+
version = "1.0.1"
1616

1717
[build-system]
1818
build-backend = "scikit_build_core.build"

tests/python/test.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def gradient_no_constraints_test(self):
308308
def gradient_with_constraints_test(self):
309309
# 1. Run Luksan-Vlcek problem with optgra
310310
prob = pygmo.problem(luksan_vlcek())
311-
prob.c_tol = 1e-6
311+
prob.c_tol = 1e-7
312312
og = pyoptgra.optgra(
313313
optimization_method=1,
314314
max_iterations=100,
@@ -325,6 +325,8 @@ def gradient_with_constraints_test(self):
325325

326326
# objective function
327327
self.assertLess(pop.champion_f[0], 2.26)
328+
# checking exact value as regression test
329+
self.assertEqual(pop.champion_f[0], 0.82929210248477)
328330

329331
# equality constraints
330332
for i in [1, 2, 3, 4]:
@@ -351,6 +353,8 @@ def gradient_with_constraints_test(self):
351353

352354
# objective function
353355
self.assertLess(pop2.champion_f[0], 2.26)
356+
# checking exact value as regression test
357+
self.assertEqual(pop2.champion_f[0], 0.8292921025820391)
354358

355359
# equality constraints
356360
for i in [1, 2, 3, 4]:

0 commit comments

Comments
 (0)