Skip to content

Commit 793bbf8

Browse files
authored
more code cleaning/polishing (#571)
* Fix acq min bug * Update docs, make `random_state` not a property of acqs * fix giving bad seeds to l-bfgs-b * remove debug code * Relax constraint.approx test slightly * GPHedge: ensure random state * nitpick fixes * Make constraints work with parameter types * Use `deepcopy` when copying constraint in `ConstantLiar` * Move the instantiation of `ConstraintModel` * Update warning * remove unnecessary random state storing * Remove unnecessary import * Add a `.random_sample` function to the optimizer * Fix types, refactor some test code * Use AE spelling * Remove unnecessary `extra_kwargs`
1 parent f6cde7f commit 793bbf8

File tree

4 files changed

+46
-151
lines changed

4 files changed

+46
-151
lines changed

bayes_opt/logger.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from colorama import Fore, just_fix_windows_console
99

1010
if TYPE_CHECKING:
11-
from bayes_opt.parameter import ParameterConfig
11+
from bayes_opt.parameter import ParamsType
1212

1313
just_fix_windows_console()
1414

@@ -28,9 +28,9 @@ class ScreenLogger:
2828

2929
_default_cell_size = 9
3030
_default_precision = 4
31-
_colour_new_max = Fore.MAGENTA
32-
_colour_regular_message = Fore.RESET
33-
_colour_reset = Fore.RESET
31+
_color_new_max = Fore.MAGENTA
32+
_color_regular_message = Fore.RESET
33+
_color_reset = Fore.RESET
3434

3535
def __init__(self, verbose: int = 2, is_constrained: bool = False) -> None:
3636
self._verbose = verbose
@@ -141,8 +141,8 @@ def _print_step(
141141
self,
142142
keys: list[str],
143143
result: dict[str, Any],
144-
params_config: Mapping[str, ParameterConfig],
145-
colour: str = _colour_regular_message,
144+
params_config: Mapping[str, ParamsType],
145+
color: str = _color_regular_message,
146146
) -> str:
147147
"""Print a step.
148148
@@ -154,12 +154,12 @@ def _print_step(
154154
keys : list[str]
155155
The parameter keys.
156156
157-
params_config : Mapping[str, ParameterConfig]
157+
params_config : Mapping[str, ParamsType]
158158
The configuration to map the key to the parameter for correct formatting.
159159
160-
colour : str, optional
160+
color : str, optional
161161
Color to use for the output.
162-
(Default value = _colour_regular_message, equivalent to Fore.RESET)
162+
(Default value = _color_regular_message, equivalent to Fore.RESET)
163163
164164
Returns
165165
-------
@@ -178,7 +178,7 @@ def _print_step(
178178
else params_config[key].to_string(val, self._default_cell_size)
179179
for key, val in params.items()
180180
]
181-
return "| " + " | ".join(colour + x + self._colour_reset for x in cells if x is not None) + " |"
181+
return "| " + " | ".join(color + x + self._color_reset for x in cells if x is not None) + " |"
182182

183183
def _print_header(self, keys: list[str]) -> str:
184184
"""Print the header of the log.
@@ -258,7 +258,7 @@ def log_optimization_step(
258258
self,
259259
keys: list[str],
260260
result: dict[str, Any],
261-
params_config: Mapping[str, ParameterConfig],
261+
params_config: Mapping[str, ParamsType],
262262
current_max: dict[str, Any] | None,
263263
) -> None:
264264
"""Log an optimization step.
@@ -271,7 +271,7 @@ def log_optimization_step(
271271
result : dict[str, Any]
272272
The result dictionary for the most recent step.
273273
274-
params_config : Mapping[str, ParameterConfig]
274+
params_config : Mapping[str, ParamsType]
275275
The configuration to map the key to the parameter for correct formatting.
276276
277277
current_max : dict[str, Any] | None
@@ -283,8 +283,8 @@ def log_optimization_step(
283283
return
284284

285285
if self._verbose == 2 or is_new_max:
286-
colour = self._colour_new_max if is_new_max else self._colour_regular_message
287-
line = self._print_step(keys, result, params_config, colour=colour) + "\n"
286+
color = self._color_new_max if is_new_max else self._color_regular_message
287+
line = self._print_step(keys, result, params_config, color=color) + "\n"
288288
if self._verbose:
289289
print(line, end="")
290290

tests/test_acquisition.py

Lines changed: 26 additions & 131 deletions
Original file line numberDiff line numberDiff line change
@@ -406,130 +406,30 @@ def verify_optimizers_match(optimizer1, optimizer2):
406406
assert suggestion1 == suggestion2, f"\nSuggestion 1: {suggestion1}\nSuggestion 2: {suggestion2}"
407407

408408

409-
def test_integration_upper_confidence_bound(target_func_x_and_y, pbounds, tmp_path):
410-
"""Test save/load integration with UpperConfidenceBound acquisition."""
411-
acquisition_function = UpperConfidenceBound(kappa=2.576)
412-
413-
# Create and run first optimizer
414-
optimizer = BayesianOptimization(
415-
f=target_func_x_and_y,
416-
pbounds=pbounds,
417-
acquisition_function=acquisition_function,
418-
random_state=1,
419-
verbose=0,
420-
)
421-
optimizer.maximize(init_points=2, n_iter=3)
422-
423-
# Save state
424-
state_path = tmp_path / "ucb_state.json"
425-
optimizer.save_state(state_path)
426-
427-
# Create new optimizer and load state
428-
new_optimizer = BayesianOptimization(
429-
f=target_func_x_and_y,
430-
pbounds=pbounds,
431-
acquisition_function=UpperConfidenceBound(kappa=2.576),
432-
random_state=1,
433-
verbose=0,
434-
)
435-
new_optimizer.load_state(state_path)
436-
437-
verify_optimizers_match(optimizer, new_optimizer)
438-
439-
440-
def test_integration_probability_improvement(target_func_x_and_y, pbounds, tmp_path):
441-
"""Test save/load integration with ProbabilityOfImprovement acquisition."""
442-
acquisition_function = ProbabilityOfImprovement(xi=0.01)
443-
444-
optimizer = BayesianOptimization(
445-
f=target_func_x_and_y,
446-
pbounds=pbounds,
447-
acquisition_function=acquisition_function,
448-
random_state=1,
449-
verbose=0,
450-
)
451-
optimizer.maximize(init_points=2, n_iter=3)
452-
453-
state_path = tmp_path / "pi_state.json"
454-
optimizer.save_state(state_path)
455-
456-
new_optimizer = BayesianOptimization(
457-
f=target_func_x_and_y,
458-
pbounds=pbounds,
459-
acquisition_function=ProbabilityOfImprovement(xi=0.01),
460-
random_state=1,
461-
verbose=0,
462-
)
463-
new_optimizer.load_state(state_path)
464-
465-
verify_optimizers_match(optimizer, new_optimizer)
466-
467-
468-
def test_integration_expected_improvement(target_func_x_and_y, pbounds, tmp_path):
469-
"""Test save/load integration with ExpectedImprovement acquisition."""
470-
acquisition_function = ExpectedImprovement(xi=0.01)
471-
472-
optimizer = BayesianOptimization(
473-
f=target_func_x_and_y,
474-
pbounds=pbounds,
475-
acquisition_function=acquisition_function,
476-
random_state=1,
477-
verbose=0,
478-
)
479-
optimizer.maximize(init_points=2, n_iter=3)
480-
481-
state_path = tmp_path / "ei_state.json"
482-
optimizer.save_state(state_path)
483-
484-
new_optimizer = BayesianOptimization(
485-
f=target_func_x_and_y,
486-
pbounds=pbounds,
487-
acquisition_function=ExpectedImprovement(xi=0.01),
488-
random_state=1,
489-
verbose=0,
490-
)
491-
new_optimizer.load_state(state_path)
492-
493-
verify_optimizers_match(optimizer, new_optimizer)
494-
495-
496-
def test_integration_constant_liar(target_func_x_and_y, pbounds, tmp_path):
497-
"""Test save/load integration with ConstantLiar acquisition."""
498-
base_acq = UpperConfidenceBound(kappa=2.576)
499-
acquisition_function = ConstantLiar(base_acquisition=base_acq)
500-
501-
optimizer = BayesianOptimization(
502-
f=target_func_x_and_y,
503-
pbounds=pbounds,
504-
acquisition_function=acquisition_function,
505-
random_state=1,
506-
verbose=0,
507-
)
508-
optimizer.maximize(init_points=2, n_iter=3)
509-
510-
state_path = tmp_path / "cl_state.json"
511-
optimizer.save_state(state_path)
512-
513-
new_optimizer = BayesianOptimization(
514-
f=target_func_x_and_y,
515-
pbounds=pbounds,
516-
acquisition_function=ConstantLiar(base_acquisition=UpperConfidenceBound(kappa=2.576)),
517-
random_state=1,
518-
verbose=0,
519-
)
520-
new_optimizer.load_state(state_path)
521-
522-
verify_optimizers_match(optimizer, new_optimizer)
523-
524-
525-
def test_integration_gp_hedge(target_func_x_and_y, pbounds, tmp_path):
526-
"""Test save/load integration with GPHedge acquisition."""
527-
base_acquisitions = [
528-
UpperConfidenceBound(kappa=2.576),
529-
ProbabilityOfImprovement(xi=0.01),
530-
ExpectedImprovement(xi=0.01),
531-
]
532-
acquisition_function = GPHedge(base_acquisitions=base_acquisitions)
409+
@pytest.mark.parametrize(
410+
("acquisition_fn_factory", "state_filename"),
411+
[
412+
(lambda: UpperConfidenceBound(kappa=2.576), "ucb_state.json"),
413+
(lambda: ProbabilityOfImprovement(xi=0.01), "pi_state.json"),
414+
(lambda: ExpectedImprovement(xi=0.01), "ei_state.json"),
415+
(lambda: ConstantLiar(base_acquisition=UpperConfidenceBound(kappa=2.576)), "cl_state.json"),
416+
(
417+
lambda: GPHedge(
418+
base_acquisitions=[
419+
UpperConfidenceBound(kappa=2.576),
420+
ProbabilityOfImprovement(xi=0.01),
421+
ExpectedImprovement(xi=0.01),
422+
]
423+
),
424+
"gphedge_state.json",
425+
),
426+
],
427+
)
428+
def test_integration_acquisition_functions(
429+
acquisition_fn_factory, state_filename, target_func_x_and_y, pbounds, tmp_path
430+
):
431+
"""Parametrized integration test for acquisition functions."""
432+
acquisition_function = acquisition_fn_factory()
533433

534434
optimizer = BayesianOptimization(
535435
f=target_func_x_and_y,
@@ -540,18 +440,13 @@ def test_integration_gp_hedge(target_func_x_and_y, pbounds, tmp_path):
540440
)
541441
optimizer.maximize(init_points=2, n_iter=3)
542442

543-
state_path = tmp_path / "gphedge_state.json"
443+
state_path = tmp_path / state_filename
544444
optimizer.save_state(state_path)
545445

546-
new_base_acquisitions = [
547-
UpperConfidenceBound(kappa=2.576),
548-
ProbabilityOfImprovement(xi=0.01),
549-
ExpectedImprovement(xi=0.01),
550-
]
551446
new_optimizer = BayesianOptimization(
552447
f=target_func_x_and_y,
553448
pbounds=pbounds,
554-
acquisition_function=GPHedge(base_acquisitions=new_base_acquisitions),
449+
acquisition_function=acquisition_fn_factory(),
555450
random_state=1,
556451
verbose=0,
557452
)

tests/test_logger.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def test_step():
157157
# Test with custom color
158158
custom_color = Fore.RED
159159
step_str_colored = logger._print_step(
160-
optimizer._space.keys, optimizer._space.res()[-1], optimizer._space.params_config, colour=custom_color
160+
optimizer._space.keys, optimizer._space.res()[-1], optimizer._space.params_config, color=custom_color
161161
)
162162
assert custom_color in step_str_colored
163163

tests/test_target_space.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
import numpy as np
44
import pytest
5+
from scipy.optimize import NonlinearConstraint
56

6-
from bayes_opt.constraint import ConstraintModel
77
from bayes_opt.exception import NotUniqueError
88
from bayes_opt.target_space import TargetSpace
99

@@ -99,7 +99,7 @@ def test_register():
9999

100100

101101
def test_register_with_constraint():
102-
constraint = ConstraintModel(lambda x: x, -2, 2, transform=lambda x: x)
102+
constraint = NonlinearConstraint(lambda x: x, -2, 2)
103103
space = TargetSpace(target_func, PBOUNDS, constraint=constraint)
104104

105105
assert len(space) == 0
@@ -194,7 +194,7 @@ def test_y_max():
194194

195195
def test_y_max_with_constraint():
196196
PBOUNDS = {"p1": (0, 10), "p2": (1, 100)}
197-
constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2)
197+
constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2)
198198
space = TargetSpace(target_func, PBOUNDS, constraint)
199199
assert space._target_max() is None
200200
space.probe(params={"p1": 1, "p2": 2}) # Feasible
@@ -228,7 +228,7 @@ def test_max():
228228

229229
def test_max_with_constraint():
230230
PBOUNDS = {"p1": (0, 10), "p2": (1, 100)}
231-
constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2)
231+
constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2)
232232
space = TargetSpace(target_func, PBOUNDS, constraint=constraint)
233233

234234
assert space.max() is None
@@ -241,7 +241,7 @@ def test_max_with_constraint():
241241

242242
def test_max_with_constraint_identical_target_value():
243243
PBOUNDS = {"p1": (0, 10), "p2": (1, 100)}
244-
constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2)
244+
constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2)
245245
space = TargetSpace(target_func, PBOUNDS, constraint=constraint)
246246

247247
assert space.max() is None

0 commit comments

Comments
 (0)