diff --git a/docs/notebooks/trust_region.pct.py b/docs/notebooks/trust_region.pct.py index c55a80fe28..7095fb3037 100644 --- a/docs/notebooks/trust_region.pct.py +++ b/docs/notebooks/trust_region.pct.py @@ -196,7 +196,11 @@ def plot_history(result: trieste.bayesian_optimizer.OptimizationResult) -> None: # `EfficientGlobalOptimization` coupled with the `ParallelContinuousThompsonSampling` acquisition # function. # -# Note: the number of sub-spaces/regions must match the number of batch query points. +# Note: in this example the number of sub-spaces/regions is equal to the number of batch query +# points in the base-rule. This results in each region contributing one query point to the overall +# batch. However, it is possible to generate multiple query points from each region by setting +# `num_query_points` to be a multiple `Q` of the number of regions. In this case, each region will +# contribute `Q` query points to the overall batch. # %% num_query_points = 5 diff --git a/tests/unit/acquisition/test_optimizer.py b/tests/unit/acquisition/test_optimizer.py index e2f7e6a0f3..50d83a8edd 100644 --- a/tests/unit/acquisition/test_optimizer.py +++ b/tests/unit/acquisition/test_optimizer.py @@ -209,16 +209,19 @@ def test_optimize_continuous_raises_with_mismatch_multi_search_space() -> None: space_B = Box([3], [4]) multi_space = TaggedMultiSearchSpace(spaces=[space_A, space_B]) acq_fn = _quadratic_sum([1.0]) - with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="The batch shape of initial samples 2 must"): + with pytest.raises( + TF_DEBUGGING_ERROR_TYPES, match="The vectorization of the target function 1 must be " + ): generate_continuous_optimizer()(multi_space, acq_fn) -def test_optimize_continuous_finds_points_in_multi_search_space_boxes() -> None: +@pytest.mark.parametrize("points_per_box", [1, 3]) +def test_optimize_continuous_finds_points_in_multi_search_space_boxes(points_per_box: int) -> None: # Test with non-overlapping grid of 2D boxes. Optimize them as a batch and check that each - # point is only in the corresponding box. + # point is only in the corresponding box (with potentially multiple points per box). boxes = [Box([x, y], [x + 0.7, y + 0.7]) for x in range(-2, 2) for y in range(-2, 2)] multi_space = TaggedMultiSearchSpace(spaces=boxes) - batch_size = len(boxes) + batch_size = len(boxes) * points_per_box def target_function(x: TensorType) -> TensorType: # [N, V, D] -> [N, V] individual_func = [_quadratic_sum([1.0])(x[:, i : i + 1, :]) for i in range(batch_size)] @@ -232,7 +235,7 @@ def target_function(x: TensorType) -> TensorType: # [N, V, D] -> [N, V] # corresponding box. for i, point in enumerate(max_points): for j, box in enumerate(boxes): - if i == j: + if i % len(boxes) == j: assert point in box else: assert point not in box diff --git a/trieste/acquisition/optimizer.py b/trieste/acquisition/optimizer.py index d9b8550d66..a70eb299be 100644 --- a/trieste/acquisition/optimizer.py +++ b/trieste/acquisition/optimizer.py @@ -252,19 +252,21 @@ def optimize_continuous( candidates = space.sample(num_initial_samples) if tf.rank(candidates) == 3: # If samples is a tensor of rank 3, then it is a batch of samples. In this case - # the length of the second dimension must be equal to the vectorization of the target - # function. + # the vectorization of the target function must be a multiple of the length of the + # second (batch) dimension. + remainder = V % tf.shape(candidates)[1] tf.debugging.assert_equal( - tf.shape(candidates)[1], - V, + remainder, + tf.cast(0, dtype=remainder.dtype), message=( f""" - The batch shape of initial samples {tf.shape(candidates)[1]} must be equal to - the vectorization of the target function {V}. + The vectorization of the target function {V} must be a multiple of the batch + shape of initial samples {tf.shape(candidates)[1]}. """ ), ) - tiled_candidates = candidates # [num_initial_samples, V, D] + multiple = V // tf.shape(candidates)[1] + tiled_candidates = tf.tile(candidates, [1, multiple, 1]) # [num_initial_samples, V, D] else: tf.debugging.assert_rank( candidates, @@ -325,19 +327,23 @@ def optimize_continuous( random_points = space.sample(num_recovery_runs) if tf.rank(random_points) == 3: # If samples is a tensor of rank 3, then it is a batch of samples. In this case - # the length of the second dimension must be equal to the vectorization of the - # target function. + # the vectorization of the target function must be a multiple of the length of the + # second (batch) dimension. + remainder = V % tf.shape(random_points)[1] tf.debugging.assert_equal( - tf.shape(random_points)[1], - V, + remainder, + tf.cast(0, dtype=remainder.dtype), message=( f""" - The batch shape of random samples {tf.shape(random_points)[1]} must be - equal to the vectorization of the target function {V}. + The vectorization of the target function {V} must be a multiple of the batch + shape of random samples {tf.shape(random_points)[1]}. """ ), ) - tiled_random_points = random_points # [num_recovery_runs, V, D] + multiple = V // tf.shape(random_points)[1] + tiled_random_points = tf.tile( + random_points, [1, multiple, 1] # [num_recovery_runs, V, D] + ) else: tf.debugging.assert_rank( random_points, @@ -497,19 +503,21 @@ def _objective_value_and_gradient(x: TensorType) -> Tuple[TensorType, TensorType spo.Bounds(lower, upper) for lower, upper in zip(space.subspace_lower, space.subspace_upper) ] - # If bounds is a sequence of tensors, stack them into a single tensor. In this case - # the length of the sequence must be equal to the vectorization of the target function. + # The bounds is a sequence of tensors, stack them into a single tensor. In this case + # the vectorization of the target function must be a multple of the length of the sequence. + remainder = V % len(bounds) tf.debugging.assert_equal( - len(bounds), - V, + remainder, + tf.cast(0, dtype=remainder.dtype), message=( f""" - The length of bounds sequence {len(bounds)} must be equal to the - vectorization of the target function {V}. + The vectorization of the target function {V} must be a multiple of the length + of the bounds sequence {len(bounds)}. """ ), ) - bounds = bounds * num_optimization_runs_per_function + multiple = V // len(bounds) + bounds = bounds * multiple * num_optimization_runs_per_function else: bounds = [spo.Bounds(space.lower, space.upper)] * num_optimization_runs