13
13
# and https://essr.esa.int/license/european-space-agency-community-license-v2-4-weak-copyleft
14
14
15
15
from math import isfinite
16
- from collections import deque
17
16
from typing import Callable , List , Optional , Tuple , Union
18
17
19
18
import numpy as np
29
28
30
29
31
30
class khan_function :
32
- """Function to smothly enforce optimisation parameter bounds as Michal Khan used to do:
31
+ r """Function to smothly enforce optimisation parameter bounds as Michal Khan used to do:
33
32
34
33
.. math::
35
34
@@ -59,9 +58,14 @@ def __init__(self, lb: List[float], ub: List[float], unity_gradient: bool = True
59
58
self ._ub = np .asarray (ub )
60
59
self ._nx = len (lb )
61
60
62
- # determine finite lower/upper bounds
63
- finite_lb = np .isfinite (self ._lb )
64
- finite_ub = np .isfinite (self ._ub )
61
+ # determine finite lower/upper bounds\
62
+ def _isfinite (a : np .ndarray ):
63
+ """Custom _ function"""
64
+ almost_infinity = 1e300
65
+ return np .logical_and (np .isfinite (a ), np .abs (a ) < almost_infinity )
66
+
67
+ finite_lb = _isfinite (self ._lb )
68
+ finite_ub = _isfinite (self ._ub )
65
69
66
70
# we only support cases where both lower and upper bounds are finite if given
67
71
check = np .where (finite_lb != finite_ub )[0 ]
@@ -71,8 +75,14 @@ def __init__(self, lb: List[float], ub: List[float], unity_gradient: bool = True
71
75
"must be finite."
72
76
f"Detected mismatch at decision vector indices: { check } "
73
77
)
78
+
79
+ # also exclude parameters where lower and upper bounds are identical
80
+ with np .errstate (invalid = "ignore" ):
81
+ # we ignore RuntimeWarning: invalid value encountered in subtract
82
+ nonzero_diff = abs (self ._lb - self ._ub ) > 1e-9
83
+
74
84
# store the mask of finite bounds
75
- self .mask = finite_ub
85
+ self .mask = np . logical_and ( finite_ub , nonzero_diff )
76
86
self ._lb_masked = self ._lb [self .mask ]
77
87
self ._ub_masked = self ._ub [self .mask ]
78
88
@@ -93,12 +103,14 @@ def _eval_inv(self, x_masked: np.ndarray) -> np.ndarray:
93
103
arg = (2 * x_masked - self ._ub_masked - self ._lb_masked ) / (
94
104
self ._ub_masked - self ._lb_masked
95
105
)
96
- if np .any ((arg < - 1.0 ) | (arg > 1.0 )):
106
+
107
+ clip_value = 1.0 - 1e-8 # avoid boundaries
108
+ if np .any ((arg < - clip_value ) | (arg > clip_value )):
97
109
print (
98
110
"WARNING: Numerical inaccuracies encountered during khan_function inverse." ,
99
111
"Clipping parameters to valid range." ,
100
112
)
101
- arg = np .clip (arg , - 1.0 , 1.0 )
113
+ arg = np .clip (arg , - clip_value , clip_value )
102
114
return (np .arcsin (arg ) - self ._b ) / self ._a
103
115
104
116
def _eval_grad (self , x_khan_masked : np .ndarray ) -> np .ndarray :
@@ -249,31 +261,26 @@ def wrapped_fitness(x):
249
261
# if Khan function is used, we first need to convert to pagmo parameters
250
262
x = khanf .eval (x_khan = x )
251
263
252
- fixed_x = x
253
- lb , ub = problem .get_bounds ()
254
-
255
264
if force_bounds :
256
- for i in range (problem .get_nx ()):
257
- if x [i ] < lb [i ]:
258
- fixed_x [i ] = lb [i ]
259
- if x [i ] > ub [i ]:
260
- fixed_x [i ] = ub [i ]
265
+ fixed_x = np .clip (x , lb , ub )
266
+ else :
267
+ fixed_x = x
261
268
262
- result = deque (problem .fitness (fixed_x ))
269
+ # call pagmo fitness function
270
+ result = problem .fitness (fixed_x )
263
271
264
272
# add constraints derived from box bounds
265
273
if bounds_to_constraints :
266
- for i in range (len (lb )):
267
- if isfinite (lb [i ]):
268
- result .append (x [i ] - lb [i ])
274
+ # Add (x[i] - lb[i]) for finite lb[i] and (ub[i] - x[i]) for finite ub[i]
275
+ result = np .concatenate (
276
+ [result , (x - lb )[np .isfinite (lb )], (ub - x )[np .isfinite (ub )]]
277
+ )
269
278
270
- for i in range ( len ( ub )):
271
- if isfinite ( ub [ i ]):
272
- result . append ( ub [ i ] - x [ i ])
279
+ # reorder constraint order, optgra expects the merit function last, pagmo has it first
280
+ # equivalent to rotating in a dequeue
281
+ result = np . concatenate ([ result [ 1 :], result [ 0 : 1 ] ])
273
282
274
- # optgra expects the fitness last, pagmo has the fitness first
275
- result .rotate (- 1 )
276
- return list (result )
283
+ return result .tolist () # return a list
277
284
278
285
return wrapped_fitness
279
286
@@ -284,17 +291,14 @@ def _wrap_gradient_func(
284
291
force_bounds = False ,
285
292
khanf : Optional [khan_function ] = None ,
286
293
):
294
+
287
295
# get the sparsity pattern to index the sparse gradients
288
296
sparsity_pattern = problem .gradient_sparsity ()
289
297
f_indices , x_indices = sparsity_pattern .T # Unpack indices
290
298
291
299
# expected shape of the non-sparse gradient matrix
292
300
shape = (problem .get_nf (), problem .get_nx ())
293
301
294
- # get problem parameters
295
- lb , ub = problem .get_bounds ()
296
- nx = problem .get_nx ()
297
-
298
302
def wrapped_gradient (x ):
299
303
300
304
# we are using vectorisation internally -> convert to ndarray
@@ -304,6 +308,10 @@ def wrapped_gradient(x):
304
308
# if Khan function is used, we first need to convert to pagmo parameters
305
309
x = khanf .eval (x_khan = x )
306
310
311
+ # get problem parameters
312
+ lb , ub = problem .get_bounds ()
313
+ nx = problem .get_nx ()
314
+
307
315
# force parameters to lower and upper bounds if needed
308
316
if force_bounds :
309
317
fixed_x = np .clip (x , lb , ub )
@@ -321,15 +329,15 @@ def wrapped_gradient(x):
321
329
result [f_indices , x_indices ] = sparse_values
322
330
323
331
# add box-derived constraints
332
+ result = result .tolist ()
324
333
if bounds_to_constraints :
325
-
326
334
# lower bound gradients
327
335
finite_indices = np .isfinite (lb ) # Boolean mask for valid indices
328
336
box_lb_grads = np .eye (nx )[finite_indices ]
329
337
330
338
# upper bound gradients
331
339
finite_indices = np .isfinite (ub ) # Boolean mask for valid indices
332
- box_ub_grads = np .eye (nx )[finite_indices ]
340
+ box_ub_grads = - 1.0 * np .eye (nx )[finite_indices ]
333
341
334
342
# append box bounds to gradient matrix
335
343
result = np .concatenate ([result , box_lb_grads , box_ub_grads ])
0 commit comments