diff --git a/trieste/acquisition/optimizer.py b/trieste/acquisition/optimizer.py index a70eb299be..c6c818428a 100644 --- a/trieste/acquisition/optimizer.py +++ b/trieste/acquisition/optimizer.py @@ -192,6 +192,9 @@ def generate_continuous_optimizer( If all `num_optimization_runs` optimizations fail to converge then we run `num_recovery_runs` additional runs starting from random locations (also ran in parallel). + **Note:** using a large number of `num_initial_samples` and `num_optimization_runs` with a + high-dimensional search space can consume a large amount of CPU memory (RAM). + :param num_initial_samples: The size of the random sample used to find the starting point(s) of the optimization. :param num_optimization_runs: The number of separate optimizations to run. diff --git a/trieste/acquisition/utils.py b/trieste/acquisition/utils.py index 590b0bb416..8afe5d07eb 100644 --- a/trieste/acquisition/utils.py +++ b/trieste/acquisition/utils.py @@ -48,7 +48,8 @@ def wrapper(x: TensorType) -> TensorType: if length == 0: return fn(x) - elements_per_block = tf.size(x) / length + # Use int64 to calculate the input tensor size, otherwise we can overflow for large tensors. + elements_per_block = tf.size(x, out_type=tf.int64) / length blocks_per_batch = tf.cast(tf.math.ceil(split_size / elements_per_block), tf.int32) num_batches = tf.cast(tf.math.ceil(length / blocks_per_batch) - 1, tf.int32)