Skip to content

Commit 48f0bc1

Browse files
Merge pull request #12 from esa/fix/get_extra_info
Fix/get extra info
2 parents b92105c + fedcb12 commit 48f0bc1

File tree

2 files changed

+20
-2
lines changed

2 files changed

+20
-2
lines changed

pyoptgra/optgra.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,12 @@ def _get_constraint_violation(
7878
return violation_norm, num_violations
7979

8080

81+
def _assert_finite(arr: np.ndarray, name: str):
82+
mask = ~np.isfinite(arr) # True for NaN, Inf, -Inf
83+
if np.any(mask):
84+
raise ValueError(f"Encountered non-finite values in {name} at indices: {np.where(mask)[0]}")
85+
86+
8187
class optgra:
8288
"""
8389
This class is a user defined algorithm (UDA) providing a wrapper around OPTGRA, which is written
@@ -126,6 +132,7 @@ def wrapped_fitness(x):
126132

127133
# we are using vectorisation internally -> convert to ndarray
128134
x = np.asarray(x, dtype=np.float64)
135+
_assert_finite(x, "decision vector") # catch nan values
129136

130137
if khanf:
131138
# if Khan function is used, we first need to convert to pagmo parameters
@@ -149,6 +156,7 @@ def wrapped_fitness(x):
149156
# reorder constraint order, optgra expects the merit function last, pagmo has it first
150157
# equivalent to rotating in a dequeue
151158
result = np.concatenate([result[1:], result[0:1]])
159+
_assert_finite(result, "fitness") # catch nan values
152160

153161
return result.tolist() # return a list
154162

@@ -173,6 +181,7 @@ def wrapped_gradient(x):
173181

174182
# we are using vectorisation internally -> convert to ndarray
175183
x = np.asarray(x, dtype=np.float64)
184+
_assert_finite(x, "decision vector") # catch nan values
176185

177186
if khanf:
178187
# if Khan function is used, we first need to convert to pagmo parameters
@@ -221,6 +230,8 @@ def wrapped_gradient(x):
221230
khan_grad = khanf.eval_grad(x)
222231
result = result @ khan_grad
223232

233+
_assert_finite(result, "gradient") # catch nan values
234+
224235
return result.tolist() # return as a list, not ndarray
225236

226237
return wrapped_gradient
@@ -229,8 +240,8 @@ def __init__(
229240
self,
230241
max_iterations: int = 150,
231242
max_correction_iterations: int = 90,
232-
max_distance_per_iteration: int = 10,
233-
perturbation_for_snd_order_derivatives: int = 1,
243+
max_distance_per_iteration: float = 10,
244+
perturbation_for_snd_order_derivatives: float = 1,
234245
variable_scaling_factors: List[float] = [], # x_dim
235246
variable_types: List[int] = [], # x_dim
236247
constraint_priorities: List[int] = [], # f_dim

tests/python/test.py

+7
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,13 @@ def set_seed(self, seed):
272272
algo = pygmo.algorithm(pyoptgra.optgra(constraint_priorities=[1] * 61))
273273
algo.evolve(pop)
274274

275+
# check than nan in the decision vector is caught
276+
x = pop.get_x()[0]
277+
x[3] = np.nan
278+
pop.set_x(0, x)
279+
with self.assertRaises(ValueError):
280+
algo.evolve(pop)
281+
275282
def basic_no_gradient_test(self):
276283
# Basic test that the call works and the result changes. No constraints, not gradients.
277284

0 commit comments

Comments
 (0)