Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implementation of a Dagstuhloid benchmark, i.e. a benchmark inspired by discussions at Dagstuhl #1531

Merged
merged 57 commits into from
Jul 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
b26168c
fixadcas
teytaud Jun 25, 2023
e082287
fix
teytaud Jun 26, 2023
ae2390b
fix
teytaud Jun 26, 2023
b9bb80e
ouie
teytaud Jun 26, 2023
db28e6b
fix
teytaud Jun 26, 2023
3d8a6ec
fix
teytaud Jun 26, 2023
65ef657
fix
teytaud Jun 26, 2023
9585650
fix
teytaud Jun 26, 2023
ee08868
fix
teytaud Jun 26, 2023
03098e5
fix
teytaud Jun 26, 2023
b032800
fix
teytaud Jun 26, 2023
65c7882
fix
teytaud Jun 26, 2023
0549832
po
teytaud Jun 26, 2023
78eb6bd
fix
teytaud Jun 26, 2023
622d2ca
fix
teytaud Jun 26, 2023
637b704
fix
teytaud Jun 26, 2023
a655e19
fix
teytaud Jun 26, 2023
8f3ad01
fix
teytaud Jun 26, 2023
e385126
fix
teytaud Jun 26, 2023
186a427
fix
teytaud Jun 26, 2023
42c0115
fix
teytaud Jun 26, 2023
55ad29f
fix
teytaud Jun 27, 2023
f80e9f1
fix
teytaud Jun 27, 2023
edf52b1
fix
teytaud Jun 27, 2023
e28fb27
fix
teytaud Jun 27, 2023
272b276
fix
teytaud Jun 27, 2023
bee6a96
fix
teytaud Jun 27, 2023
d88ecb3
fix
teytaud Jun 27, 2023
c11d518
fix
teytaud Jun 27, 2023
87af3de
po
teytaud Jun 28, 2023
4ef23fb
fix
teytaud Jun 28, 2023
6b7dd0e
fix
teytaud Jun 27, 2023
7f46597
fix
teytaud Jun 28, 2023
fcbd663
fix
teytaud Jun 29, 2023
bf514db
fix
teytaud Jun 29, 2023
c995894
fix
teytaud Jun 29, 2023
5437ca2
fix
teytaud Jun 29, 2023
f2934c2
okarc
teytaud Jun 29, 2023
47223b8
fix
teytaud Jun 30, 2023
d56a034
fix
teytaud Jun 30, 2023
3093b61
fix
teytaud Jun 30, 2023
830b7f6
fix
teytaud Jun 30, 2023
0c8673e
fix
teytaud Jul 1, 2023
c68e48c
fix
teytaud Jul 1, 2023
6cb07bd
bfgs (#1532)
teytaud Jul 2, 2023
8e1ede0
fix
teytaud Jun 29, 2023
4ff1cce
fix
teytaud Jul 2, 2023
813735d
fix
teytaud Jul 2, 2023
1349a59
fix
teytaud Jul 3, 2023
2d68acd
fix
teytaud Jul 3, 2023
d7f4de9
fix
teytaud Jul 4, 2023
c26e3f5
addtxt
teytaud Jul 5, 2023
f89df66
fix
teytaud Jul 6, 2023
4d296ae
fix
teytaud Jul 6, 2023
23bf991
fix
teytaud Jul 6, 2023
561f15e
fix
teytaud Jul 6, 2023
12b9deb
fix
teytaud Jul 6, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

## main

## v0.8.0 (3034-07-03)
- Add the Dagstuhloid benchmark
- Add yet another group of metamodels

## v0.7.0 (2023-06-16)
- Fix links
- Add metamodels
Expand Down
2 changes: 1 addition & 1 deletion nevergrad/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
__all__ = ["optimizers", "families", "callbacks", "p", "typing", "errors", "ops"]


__version__ = "0.7.0"
__version__ = "0.8.0"
419 changes: 388 additions & 31 deletions nevergrad/benchmark/experiments.py

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion nevergrad/benchmark/exporttable.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ def export_table(filename: str, rows: t.List[t.Any], cols: t.List[t.Any], data:
f.write("\\lccode`7=`7\n")
f.write("\\lccode`8=`8\n")
f.write("\\lccode`9=`9\n")
f.write(r"\\newcolumntype{P}[1]{>{\hspace{0pt}}p{#1}}\n")
f.write(r"\newcolumntype{P}[1]{>{\hspace{0pt}}p{#1}}")
f.write("\n")
f.write("\\begin{document}\n")
f.write("\\scriptsize\n")
f.write("\\renewcommand{\\arraystretch}{1.5}\n")
Expand Down
3 changes: 0 additions & 3 deletions nevergrad/benchmark/optgroups.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ def get_optimizers(*names: str, seed: tp.Optional[int] = None) -> tp.List[Optim]
@registry.register
def large() -> tp.Sequence[Optim]:
return [
"NGO",
"Shiwa",
"DiagonalCMA",
"CMA",
"PSO",
"DE",
Expand Down
24 changes: 22 additions & 2 deletions nevergrad/benchmark/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@


_DPI = 250
pure_algorithms = []

# %% Basic tools

Expand Down Expand Up @@ -256,7 +257,14 @@ def create_plots(
"block_dimension",
"num_objectives",
):
df[col] = df[col].astype(float).astype(int)
try:
df[col] = df[col].astype(float).astype(int)
except Exception as e1:
try:
for i in range(len(df[col])):
float(df[col][i])
except Exception as e2:
assert False, f"Fails at row {i+2}, Exceptions: {e1}, {e2}"
elif col != "loss":
df[col] = df[col].astype(str)
df[col] = df[col].replace(r"\.[0]*$", "", regex=True)
Expand Down Expand Up @@ -382,6 +390,8 @@ def create_plots(
with open(str(output_folder / name) + ".cp.txt", "w") as f:
f.write(fullname)
f.write("ranking:\n")
global pure_algorithms
pure_algorithms = list(data_df.columns[:])
for i, algo in enumerate(data_df.columns[:58]):
f.write(f" algo {i}: {algo}\n")
if name == "fight_all.png":
Expand All @@ -405,7 +415,9 @@ def create_plots(
# Average normalized plot with everything.
out_filepath = output_folder / "xpresults_all.png"
data = XpPlotter.make_data(df, normalized_loss=True)
xpplotter = XpPlotter(data, title=os.path.basename(output_folder), name_style=name_style, xaxis=xpaxis)
xpplotter = XpPlotter(
data, title=os.path.basename(output_folder), name_style=name_style, xaxis=xpaxis, pure_only=True
)
xpplotter.save(out_filepath)
# Now one xp plot per case.
for case in cases:
Expand Down Expand Up @@ -479,6 +491,7 @@ def __init__(
title: str,
name_style: tp.Optional[tp.Dict[str, tp.Any]] = None,
xaxis: str = "budget",
pure_only: bool = False,
) -> None:
if name_style is None:
name_style = NameStyle()
Expand All @@ -491,6 +504,13 @@ def __init__(
# plot from best to worst
lowerbound = np.inf
sorted_optimizers = sorted(optim_vals, key=lambda x: optim_vals[x]["loss"][-1], reverse=True)
if pure_only:
assert len(pure_algorithms) > 0
# print(sorted_optimizers, " merged with ", pure_algorithms)
sorted_optimizers = [
o for o in sorted_optimizers if o + " " in [p[: (len(o) + 1)] for p in pure_algorithms]
]
# print("Leads to ", sorted_optimizers)
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111)
# use log plot? yes, if no negative value
Expand Down
5 changes: 5 additions & 0 deletions nevergrad/functions/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,11 @@ def __new__(cls: tp.Type[EF], *args: tp.Any, **kwargs: tp.Any) -> EF:
inst._descriptors = {
x: y for x, y in callargs.items() if isinstance(y, (str, tuple, int, float, bool))
}
# if "bonnans" in str(cls.__name__) or "discrete" in str(cls.__name__) or "pbo" in str(cls.__name__):
# inst._descriptors = {
# x: y for x, y in callargs.items() if isinstance(y, (str, tuple, int, float, bool)) and "dimension" not
# in x and "paramet" not in x
# }
inst._descriptors["function_class"] = cls.__name__
return inst # type: ignore

Expand Down
2 changes: 1 addition & 1 deletion nevergrad/functions/images/imagelosses.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def __call__(self, img: np.ndarray) -> float:
assert img.shape[2] == 3
assert len(img.shape) == 3
img = np.asarray(img, dtype=np.float64)
return -float(cv2.Laplacian(img, cv2.CV_64F).var())
return -float(cv2.Laplacian(img, cv2.CV_64F).var()) # type: ignore


@registry.register
Expand Down
26 changes: 23 additions & 3 deletions nevergrad/functions/leaderboard.csv

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion nevergrad/functions/photonics/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def _make_parametrization(
), f"Cannot work with dimension {dimension} for {name}: not divisible by {shape[0]}."
b_array = np.array(bounds)
assert b_array.shape[0] == shape[0] # pylint: disable=unsubscriptable-object
ones = np.ones((1, shape[1]))
ones = np.ones((1, int(shape[1])))
init = np.sum(b_array, axis=1, keepdims=True).dot(ones) / 2 # type: ignore
if as_tuple:
instrum = ng.p.Instrumentation(
Expand Down
9 changes: 7 additions & 2 deletions nevergrad/optimization/differentialevolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,10 @@ def recommend(self) -> p.Parameter: # This is NOT the naive version. We deal wi
def _internal_ask_candidate(self) -> p.Parameter:
if len(self.population) < self.llambda: # initialization phase
init = self._config.initialization
if self.sampler is None and init == "QO":
self.sampler = oneshot.SamplingSearch(
sampler="Hammersley", scrambled=True, opposition_mode="quasi"
)(self.parametrization, budget=self.llambda)
if self.sampler is None and init not in ["gaussian", "parametrization"]:
assert init in ["LHS", "QR"]
self.sampler = oneshot.SamplingSearch(
Expand Down Expand Up @@ -306,7 +310,7 @@ class DifferentialEvolution(base.ConfiguredOptimizer):

Parameters
----------
initialization: "parametrization", "LHS" or "QR"
initialization: "parametrization", "LHS" or "QR" or "QO"
algorithm/distribution used for the initialization phase. If "parametrization", this uses the
sample method of the parametrization.
scale: float or str
Expand Down Expand Up @@ -351,7 +355,7 @@ def __init__(
) -> None:
super().__init__(_DE, locals(), as_config=True)
assert recommendation in ["optimistic", "pessimistic", "noisy", "mean"]
assert initialization in ["gaussian", "LHS", "QR", "parametrization"]
assert initialization in ["gaussian", "LHS", "QO", "QR", "parametrization"]
assert isinstance(scale, float) or scale == "mini"
if not isinstance(popsize, int):
assert popsize in ["large", "dimension", "standard"]
Expand Down Expand Up @@ -385,6 +389,7 @@ def __init__(

LhsDE = DifferentialEvolution(initialization="LHS").set_name("LhsDE", register=True)
QrDE = DifferentialEvolution(initialization="QR").set_name("QrDE", register=True)
QODE = DifferentialEvolution(initialization="QO").set_name("QODE", register=True)
NoisyDE = DifferentialEvolution(recommendation="noisy").set_name("NoisyDE", register=True)
AlmostRotationInvariantDE = DifferentialEvolution(crossover=0.9).set_name(
"AlmostRotationInvariantDE", register=True
Expand Down
83 changes: 82 additions & 1 deletion nevergrad/optimization/optimizerlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ def __init__(
"portfolio",
"discreteBSO",
"lengler",
"lengler2",
"lengler3",
"lenglerhalf",
"lenglerfourth",
"doerr",
], f"Unkwnown mutation: '{mutation}'"
if mutation == "adaptive":
Expand Down Expand Up @@ -271,6 +275,38 @@ def _internal_ask_candidate(self) -> p.Parameter:
intensity=intensity,
arity=self.arity_for_discrete_mutation,
)
elif mutation == "lengler2":
alpha = 3.0
intensity = int(max(1, self.dimension * (alpha * np.log(self.num_ask) / self.num_ask)))
data = mutator.portfolio_discrete_mutation(
pessimistic_data,
intensity=intensity,
arity=self.arity_for_discrete_mutation,
)
elif mutation == "lengler3":
alpha = 9.0
intensity = int(max(1, self.dimension * (alpha * np.log(self.num_ask) / self.num_ask)))
data = mutator.portfolio_discrete_mutation(
pessimistic_data,
intensity=intensity,
arity=self.arity_for_discrete_mutation,
)
elif mutation == "lenglerfourth":
alpha = 0.4
intensity = int(max(1, self.dimension * (alpha * np.log(self.num_ask) / self.num_ask)))
data = mutator.portfolio_discrete_mutation(
pessimistic_data,
intensity=intensity,
arity=self.arity_for_discrete_mutation,
)
elif mutation == "lenglerhalf":
alpha = 0.8
intensity = int(max(1, self.dimension * (alpha * np.log(self.num_ask) / self.num_ask)))
data = mutator.portfolio_discrete_mutation(
pessimistic_data,
intensity=intensity,
arity=self.arity_for_discrete_mutation,
)
elif mutation == "doerr":
# Selection, either random, or greedy, or a mutation rate.
assert self._doerr_index == -1, "We should have used this index in tell."
Expand Down Expand Up @@ -384,6 +420,7 @@ class ParametrizedOnePlusOne(base.ConfiguredOptimizer):
- `"portfolio"`: Random number of mutated bits (called niform mixing in
Dang & Lehre "Self-adaptation of Mutation Rates in Non-elitist Population", 2016)
- `"lengler"`: specific mutation rate chosen as a function of the dimension and iteration index.
- `"lengler{2|3|half|fourth}"`: variant of Lengler
crossover: bool
whether to add a genetic crossover step every other iteration.
use_pareto: bool
Expand Down Expand Up @@ -462,6 +499,18 @@ def __init__(
DiscreteLenglerOnePlusOne = ParametrizedOnePlusOne(mutation="lengler").set_name(
"DiscreteLenglerOnePlusOne", register=True
)
DiscreteLengler2OnePlusOne = ParametrizedOnePlusOne(mutation="lengler2").set_name(
"DiscreteLengler2OnePlusOne", register=True
)
DiscreteLengler3OnePlusOne = ParametrizedOnePlusOne(mutation="lengler3").set_name(
"DiscreteLengler3OnePlusOne", register=True
)
DiscreteLenglerHalfOnePlusOne = ParametrizedOnePlusOne(mutation="lenglerhalf").set_name(
"DiscreteLenglerHalfOnePlusOne", register=True
)
DiscreteLenglerFourthOnePlusOne = ParametrizedOnePlusOne(mutation="lenglerfourth").set_name(
"DiscreteLenglerFourthOnePlusOne", register=True
)
DiscreteLenglerOnePlusOneT = ParametrizedOnePlusOne(tabu_length=10000, mutation="lengler").set_name(
"DiscreteLenglerOnePlusOneT", register=True
)
Expand Down Expand Up @@ -760,6 +809,8 @@ def enable_pickling(self) -> None:


OldCMA = ParametrizedCMA().set_name("OldCMA", register=True)
LargeCMA = ParametrizedCMA(scale=3.0).set_name("LargeCMA", register=True)
TinyCMA = ParametrizedCMA(scale=0.33).set_name("TinyCMA", register=True)
CMA = ParametrizedCMA().set_name("CMA", register=True)
CMAbounded = ParametrizedCMA(
scale=1.5884, popsize_factor=1, elitist=True, diagonal=True, fcmaes=False
Expand Down Expand Up @@ -1352,6 +1403,16 @@ def __init__(


RescaledCMA = Rescaled().set_name("RescaledCMA", register=True)
TinyLhsDE = Rescaled(base_optimizer=LhsDE, scale=1e-3).set_name("TinyLhsDE", register=True)
TinyQODE = Rescaled(base_optimizer=QODE, scale=1e-3).set_name("TinyQODE", register=True)
TinySQP = Rescaled(base_optimizer=SQP, scale=1e-3).set_name("TinySQP", register=True)
MicroSQP = Rescaled(base_optimizer=SQP, scale=1e-6).set_name("MicroSQP", register=True)
TinySQP.no_parallelization = True
MicroSQP.no_parallelization = True
TinySPSA = Rescaled(base_optimizer=SPSA, scale=1e-3).set_name("TinySPSA", register=True)
MicroSPSA = Rescaled(base_optimizer=SPSA, scale=1e-6).set_name("MicroSPSA", register=True)
TinySPSA.no_parallelization = True
MicroSPSA.no_parallelization = True


class SplitOptimizer(base.Optimizer):
Expand Down Expand Up @@ -1703,6 +1764,8 @@ def enable_pickling(self) -> None:
optimizers=[ParametrizedCMA(random_init=True, scale=scale) for scale in [1.0, 1e-3, 1e-6]],
warmup_ratio=0.33,
).set_name("MultiScaleCMA", register=True)
LPCMA = ParametrizedCMA(popsize_factor=10.0).set_name("LPCMA", register=True)
VLPCMA = ParametrizedCMA(popsize_factor=100.0).set_name("VLPCMA", register=True)


class _MetaModel(base.Optimizer):
Expand Down Expand Up @@ -1783,6 +1846,16 @@ def __init__(
MetaModelOnePlusOne = ParametrizedMetaModel(multivariate_optimizer=OnePlusOne).set_name(
"MetaModelOnePlusOne", register=True
)
RFMetaModelOnePlusOne = ParametrizedMetaModel(multivariate_optimizer=OnePlusOne, algorithm="rf").set_name(
"RFMetaModelOnePlusOne", register=True
)
MetaModelPSO = ParametrizedMetaModel(multivariate_optimizer=PSO).set_name("MetaModelPSO", register=True)
RFMetaModelPSO = ParametrizedMetaModel(multivariate_optimizer=PSO, algorithm="rf").set_name(
"RFMetaModelPSO", register=True
)
SVMMetaModelPSO = ParametrizedMetaModel(multivariate_optimizer=PSO, algorithm="svr").set_name(
"SVMMetaModelPSO", register=True
)

MetaModelDE = ParametrizedMetaModel(multivariate_optimizer=DE).set_name("MetaModelDE", register=True)
NeuralMetaModelDE = ParametrizedMetaModel(algorithm="neural", multivariate_optimizer=DE).set_name(
Expand Down Expand Up @@ -2283,14 +2356,16 @@ def __init__(
"dimension": self.dimension,
"half": self.budget // 2 if self.budget else self.num_workers,
"third": self.budget // 3 if self.budget else self.num_workers,
"fourth": self.budget // 4 if self.budget else self.num_workers,
"tenth": self.budget // 10 if self.budget else self.num_workers,
"sqrt": int(np.sqrt(self.budget)) if self.budget else self.num_workers,
}
self.budgets = [max(1, converter[b]) if isinstance(b, str) else b for b in budgets]
last_budget = None if self.budget is None else max(4, self.budget - sum(self.budgets))
assert len(optimizers) == len(self.budgets) + 1
assert all(
x in ("third", "half", "tenth", "dimension", "num_workers", "sqrt") or x > 0 for x in self.budgets
x in ("fourth", "third", "half", "tenth", "dimension", "num_workers", "sqrt") or x > 0
for x in self.budgets
), str(self.budgets)
for opt, optbudget in zip(optimizers, self.budgets + [last_budget]): # type: ignore
self.optimizers.append(opt(self.parametrization, budget=optbudget, num_workers=self.num_workers))
Expand Down Expand Up @@ -2353,6 +2428,12 @@ def __init__(
GeneticDE = Chaining([RotatedTwoPointsDE, TwoPointsDE], [200]).set_name(
"GeneticDE", register=True
) # Also known as CGDE
MemeticDE = Chaining([RotatedTwoPointsDE, TwoPointsDE, DE, SQP], ["fourth", "fourth", "fourth"]).set_name(
"MemeticDE", register=True
)
QNDE = Chaining([QODE, BFGS], ["half"]).set_name("QNDE", register=True)
QNDE.no_parallelization = True
MemeticDE.no_parallelization = True
discretememetic = Chaining(
[RandomSearch, DiscreteLenglerOnePlusOne, DiscreteOnePlusOne], ["third", "third"]
).set_name("discretememetic", register=True)
Expand Down
3 changes: 3 additions & 0 deletions nevergrad/optimization/recastlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
"Powell",
]
or "NLOPT" in method
or "BFGS" in method
), f"Unknown method '{method}'"
self.method = method
self.random_restart = random_restart
Expand Down Expand Up @@ -212,6 +213,8 @@ def __init__(self, *, method: str = "Nelder-Mead", random_restart: bool = False)
NLOPT = NonObjectOptimizer(method="NLOPT").set_name("NLOPT", register=True)
Powell = NonObjectOptimizer(method="Powell").set_name("Powell", register=True)
RPowell = NonObjectOptimizer(method="Powell", random_restart=True).set_name("RPowell", register=True)
BFGS = NonObjectOptimizer(method="BFGS", random_restart=True).set_name("BFGS", register=True)
LBFGSB = NonObjectOptimizer(method="L-BFGS-B", random_restart=True).set_name("LBFGSB", register=True)
Cobyla = NonObjectOptimizer(method="COBYLA").set_name("Cobyla", register=True)
RCobyla = NonObjectOptimizer(method="COBYLA", random_restart=True).set_name("RCobyla", register=True)
SQP = NonObjectOptimizer(method="SLSQP").set_name("SQP", register=True)
Expand Down
Loading