Skip to content

Commit

Permalink
Chainde (#1571)
Browse files Browse the repository at this point in the history
* newsmooth

* newsmooth2

* po

* example

* fixtext

* po

* remove_warning

* fix

* po

* po

* removestuff

* po

* po

* po

* cleaning

* SmoothNotSuggest

* fixdetails

* fixdetails

* fixdetails

* fix

* chainde

* fix

* fix
  • Loading branch information
teytaud authored Nov 10, 2023
1 parent 0e63686 commit 8442810
Show file tree
Hide file tree
Showing 7 changed files with 98 additions and 51 deletions.
38 changes: 22 additions & 16 deletions examples/advbinmatrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,11 @@
cmap = plt.get_cmap('jet_r')
score = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))

numxps = 17
nmax=250

numxps = 7
list_of_pbs = ["simple", "complex", "flatparts", "combined", "smooth", "bands", "shift"]
list_of_pbs = ["simple", "complex", "combined", "hardcore"]
list_ratios = [1, 5, 10, 20]
list_ratios = [0.2, 1, 5, 10, 20, 40, 80]


# REMOVE THIS !
Expand Down Expand Up @@ -96,39 +95,46 @@ def testloss(binary_matrix):
#try:
def run_alg(k, ratio):
print(f"{k} works on {pb} with edge {n}")
optimizer = ng.optimizers.registry[k](parametrization=ng.p.Array(shape=(n,n),lower=0.,upper=1.).set_integer_casting(), budget=n*ratio)# num_workers=20)
optimizer = ng.optimizers.registry[k](parametrization=ng.p.Array(shape=(n,n),lower=0.,upper=1.).set_integer_casting(), budget=int(n*ratio))# num_workers=20)
#with futures.ProcessPoolExecutor(max_workers=optimizer.num_workers) as executor:
recommendation = optimizer.minimize(testloss)#, executor=executor, batch_mode=False)
return testloss(recommendation.value)
all_res = Parallel(n_jobs=80)(delayed(run_alg)(k, ratio) for k in algos for ratio in list_ratios)
for i, k in enumerate(algos):
for ratio in list_ratios:
score[pb + str(ratio)][n][k] += [all_res[i]]
#score[n][k] += [testloss(recommendation.value)]
return k, ratio, testloss(recommendation.value)
all_res = Parallel(n_jobs=80)(delayed(run_alg)(k, ratio) for k in algos for ratio in list_ratios for _ in range(3))
for r in all_res:
k = r[0]
ratio = r[1]
score[pb + str(ratio)][n][k] += [r[2]]
print(f"=====> {k} gets {np.average(score[pb + str(ratio)][n][k])} on {pb} with edge {n}")
#except Exception as e:
# print(f"{k} does not work ({e}), maybe package missing ?")
#if n < 10:
# continue

for f in [6]: #,5,6]:
for nbest in [7, 19]: #, 10, 20, len(score[n].keys())]:
for nbest in [7, 12, 19]: #, 10, 20, len(score[n].keys())]:
for r in [7.]:
for ratio in list_ratios:
for u in range(3):
plt.clf()
sorted_algos = sorted(score[pb + str(ratio)][n].keys(), key=lambda k: np.average(score[pb + str(ratio)][n][k]))
if n > 10:
algos = [a for a in algos if a in sorted_algos[:max(31, int(.7 * len(algos)))] ]
#if n > 10:
# algos = [a for a in algos if a in sorted_algos[:max(31, int(.7 * len(algos)))] ]
for a in range(min(nbest, len(sorted_algos))):
print(f"{a}/{nbest}: {sorted_algos[a]} for ratio {ratio} and pb {pb}")
sorted_algos = sorted_algos[:nbest][::-1]
sorted_algos = [sorted_algos[i] for i in range(len(sorted_algos)) if i <= 18 or i >= len(sorted_algos) - 2]
#sorted_algos = [sorted_algos[i] for i in range(len(sorted_algos)) if i <= 18 or i >= len(sorted_algos) - 2]
for i, k in enumerate(sorted_algos):
#print(f" for size {n}, {k} gets {np.average(score[n][k])}")
color = cmap(i/len(sorted_algos))

x = [ni for ni in score[pb + str(ratio)].keys()] # if k in score[ni]]
y = [np.average(score[pb + str(ratio)][x_][k]) + ((-1)**idx) * np.std(score[pb + str(ratio)][x_][k]) for idx, x_ in enumerate(x)]
x = sorted([ni for ni in score[pb + str(ratio)].keys()]) # if k in score[ni]]
assert max(x) == n
assert max(x) == x[-1]
y = [np.average(score[pb + str(ratio)][x_][k]) for idx, x_ in enumerate(x)]
#y = [np.average(score[pb + str(ratio)][x_][k]) * np.std(score[pb + str(ratio)][x_][k]) for idx, x_ in enumerate(x)]
plt.plot(x, y, label=k+f" {y[-1]:.2f}", c=color)
assert y[-1] == np.average(score[pb + str(ratio)][n][k]), f" {y[-1]} vs {np.average(score[pb + str(ratio)][n][k])}"
y = [np.average(score[pb + str(ratio)][x_][k]) + ((-1)**idx) * np.std(score[pb + str(ratio)][x_][k]) for idx, x_ in enumerate(x)]
plt.plot(x, y, c=color, linestyle='dashed')
#print("plot",x,y,k)
plt.text(x[-1], y[-1], k, {'rotation': min(r * (len(sorted_algos) - i - 1), 60), 'rotation_mode': 'anchor', 'horizontalalignment': 'left', 'verticalalignment': 'center',})
Expand Down
7 changes: 5 additions & 2 deletions nevergrad/benchmark/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,8 +409,8 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore

# Below, we use the best in the records above.
benchmark = str(inspect.stack()[1].function)
if benchmark in algos and "tunin" in benchmark and np.random.randint(2) > 0:
list_optims = algos[benchmark][:5]
if benchmark in algos and "tunin" in benchmark and np.random.randint(2) > 0 and False:
return algos[benchmark][:5]

# Here, we pseudo-randomly draw one optim in the provided list,
# depending on the host (so that each host is using the same optim).
Expand Down Expand Up @@ -461,6 +461,7 @@ def doint(s): # Converting a string into an int.
import socket

host = socket.gethostname()

if "iscr" in benchmark or "pbo" in benchmark:
list_optims += [
a
Expand Down Expand Up @@ -518,6 +519,8 @@ def doint(s): # Converting a string into an int.
]
if ("Smooth" in a or "Lognor" in a or "Recomb" in a)
]
list_optims = ["NgIoh7", "NgIohRW2", "LognormalDiscreteOnePlusOne"]

return [list_optims[doint(host) % len(list_optims)]]


Expand Down
36 changes: 18 additions & 18 deletions nevergrad/optimization/experimentalvariants.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
NGOpt10,
NGOpt12,
BayesOptim,
ConfPortfolio,
DiagonalCMA,
GeneticDE,
TBPSA,
NoisyOnePlusOne,
RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne,
OptimisticNoisyOnePlusOne,
# ConfPortfolio,
# DiagonalCMA,
# GeneticDE,
# TBPSA,
# NoisyOnePlusOne,
# RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne,
# OptimisticNoisyOnePlusOne,
)
from . import optimizerlib as opts
from .optimizerlib import CMA, Chaining, PSO, BO
Expand Down Expand Up @@ -343,17 +343,17 @@
)

# Specifically for RL.
MixDeterministicRL = ConfPortfolio(optimizers=[DiagonalCMA, PSO, GeneticDE]).set_name(
"MixDeterministicRL", register=True
)
SpecialRL = Chaining([MixDeterministicRL, TBPSA], ["half"]).set_name("SpecialRL", register=True)
NoisyRL1 = Chaining([MixDeterministicRL, NoisyOnePlusOne], ["half"]).set_name("NoisyRL1", register=True)
NoisyRL2 = Chaining(
[MixDeterministicRL, RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne], ["half"]
).set_name("NoisyRL2", register=True)
NoisyRL3 = Chaining([MixDeterministicRL, OptimisticNoisyOnePlusOne], ["half"]).set_name(
"NoisyRL3", register=True
)
# MixDeterministicRL = ConfPortfolio(optimizers=[DiagonalCMA, PSO, GeneticDE]).set_name(
# "MixDeterministicRL", register=True
# )
# SpecialRL = Chaining([MixDeterministicRL, TBPSA], ["half"]).set_name("SpecialRL", register=True)
# NoisyRL1 = Chaining([MixDeterministicRL, NoisyOnePlusOne], ["half"]).set_name("NoisyRL1", register=True)
# NoisyRL2 = Chaining(
# [MixDeterministicRL, RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne], ["half"]
# ).set_name("NoisyRL2", register=True)
# NoisyRL3 = Chaining([MixDeterministicRL, OptimisticNoisyOnePlusOne], ["half"]).set_name(
# "NoisyRL3", register=True
# )

# High-Speed variants
HSDE = DifferentialEvolution(high_speed=True).set_name("HSDE", register=True)
Expand Down
8 changes: 6 additions & 2 deletions nevergrad/optimization/mutations.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ def significantly_mutate(self, v: float, arity: int):
"""Randomly drawn a normal value, and redraw until it's different after discretization by the quantiles
1/arity, 2/arity, ..., (arity-1)/arity.
"""
w = v
if arity > 499:
return self.random_state.normal(0.0, 1.0)
w = self.random_state.normal(0.0, 1.0)
assert arity > 1
while discretization.threshold_discretization([w], arity) == discretization.threshold_discretization(
[v], arity
):
Expand Down Expand Up @@ -91,7 +94,8 @@ def portfolio_discrete_mutation(
boolean_vector = np.ones(dimension, dtype=bool)
while all(boolean_vector) and dimension != 1:
boolean_vector = self.random_state.rand(dimension) > float(intensity) / dimension
return [s if b else self.significantly_mutate(s, arity) for (b, s) in zip(boolean_vector, parent)]
result = [s if b else self.significantly_mutate(s, arity) for (b, s) in zip(boolean_vector, parent)]
return result

def coordinatewise_mutation(
self,
Expand Down
47 changes: 35 additions & 12 deletions nevergrad/optimization/optimizerlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,9 +270,11 @@ def _internal_ask_candidate(self) -> p.Parameter:
data = mutator.crossover(pessimistic_data, mutator.get_roulette(self.archive, num=2))
elif mutation == "lognormal":
mutation_rate = self._global_mr
assert mutation_rate > 0.0
individual_mutation_rate = 1.0 / (
1.0 + (((1.0 - mutation_rate) / mutation_rate) * np.exp(0.22 * np.random.randn()))
)
# print(f"pd={pessimistic_data}_imr={individual_mutation_rate}, d={self.dimension}, arity={self.arity_for_discrete_mutation}")
data = mutator.portfolio_discrete_mutation(
pessimistic_data,
intensity=individual_mutation_rate * self.dimension,
Expand Down Expand Up @@ -894,6 +896,7 @@ def enable_pickling(self) -> None:

OldCMA = ParametrizedCMA().set_name("OldCMA", register=True)
LargeCMA = ParametrizedCMA(scale=3.0).set_name("LargeCMA", register=True)
LargeDiagCMA = ParametrizedCMA(scale=3.0, diagonal=True).set_name("LargeDiagCMA", register=True)
TinyCMA = ParametrizedCMA(scale=0.33).set_name("TinyCMA", register=True)
CMAbounded = ParametrizedCMA(
scale=1.5884, popsize_factor=1, elitist=True, diagonal=True, fcmaes=False
Expand Down Expand Up @@ -1545,6 +1548,10 @@ def __init__(
MicroSPSA = Rescaled(base_optimizer=SPSA, scale=1e-6).set_name("MicroSPSA", register=True)
TinySPSA.no_parallelization = True
MicroSPSA.no_parallelization = True
VastLengler = Rescaled(base_optimizer=DiscreteLenglerOnePlusOne, scale=1000).set_name(
"VastLengler", register=True
)
VastDE = Rescaled(base_optimizer=DE, scale=1000).set_name("VastDE", register=True)


class SplitOptimizer(base.Optimizer):
Expand Down Expand Up @@ -2678,9 +2685,11 @@ def __init__(
"MemeticDE", register=True
)
QNDE = Chaining([QODE, BFGS], ["half"]).set_name("QNDE", register=True)
ChainDE = Chaining([DE, BFGS], ["half"]).set_name("ChainDE", register=True)
OpoDE = Chaining([OnePlusOne, QODE], ["half"]).set_name("OpoDE", register=True)
OpoTinyDE = Chaining([OnePlusOne, TinyQODE], ["half"]).set_name("OpoTinyDE", register=True)
QNDE.no_parallelization = True
ChainDE.no_parallelization = True
Carola1 = Chaining([Cobyla, MetaModel], ["half"]).set_name("Carola1", register=True)
Carola2 = Chaining([Cobyla, MetaModel, SQP], ["third", "third"]).set_name("Carola2", register=True)
Carola1.no_parallelization = True
Expand Down Expand Up @@ -4352,7 +4361,7 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 100
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
if (
self.fully_continuous
Expand All @@ -4363,14 +4372,14 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 50
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
# Special cases in the bounded case
if self.has_noise and (self.has_discrete_not_softmax or not funcinfo.metrizable):
optCls = RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne
elif self.dimension >= 60 and not funcinfo.metrizable:
optCls = CMA
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
return optCls


Expand Down Expand Up @@ -4403,7 +4412,7 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 100
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
if (
self.fully_continuous
Expand All @@ -4414,14 +4423,14 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 50
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
# Special cases in the bounded case
if self.has_noise and (self.has_discrete_not_softmax or not funcinfo.metrizable):
optCls = RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne
elif self.dimension >= 60 and not funcinfo.metrizable:
optCls = CMA
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
return optCls


Expand All @@ -4444,7 +4453,7 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 100
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
if (
self.fully_continuous
Expand All @@ -4455,14 +4464,14 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 50
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
# Special cases in the bounded case
if self.has_noise and (self.has_discrete_not_softmax or not funcinfo.metrizable):
optCls = RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne
elif self.dimension >= 60 and not funcinfo.metrizable:
optCls = CMA
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
return optCls


Expand Down Expand Up @@ -4669,7 +4678,7 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 100
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
if (
self.fully_continuous
Expand All @@ -4680,12 +4689,26 @@ def _select_optimizer_cls(self) -> base.OptCls:
and self.dimension > 1
and self.dimension < 50
):
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, Carola2")
return Carola2
# Special cases in the bounded case
if self.has_noise and (self.has_discrete_not_softmax or not funcinfo.metrizable):
optCls = RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne
elif self.dimension >= 60 and not funcinfo.metrizable:
optCls = CMA
print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
# print(f"budget={self.budget}, dim={self.dimension}, nw={self.num_workers}, we choose {optCls}")
return optCls


# Specifically for RL.
MixDeterministicRL = ConfPortfolio(optimizers=[DiagonalCMA, PSO, GeneticDE]).set_name(
"MixDeterministicRL", register=True
)
SpecialRL = Chaining([MixDeterministicRL, TBPSA], ["half"]).set_name("SpecialRL", register=True)
NoisyRL1 = Chaining([MixDeterministicRL, NoisyOnePlusOne], ["half"]).set_name("NoisyRL1", register=True)
NoisyRL2 = Chaining(
[MixDeterministicRL, RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne], ["half"]
).set_name("NoisyRL2", register=True)
NoisyRL3 = Chaining([MixDeterministicRL, OptimisticNoisyOnePlusOne], ["half"]).set_name(
"NoisyRL3", register=True
)
2 changes: 2 additions & 0 deletions nevergrad/optimization/recorded_recommendations.csv
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ CMandAS3,0.2292562325,-0.3302912524,0.1175133618,-0.8243774754,,,,,,,,,,,,
Carola1,1.0,0.0,0.0,0.0,,,,,,,,,,,,
Carola2,0.0,0.0,0.0,0.0,,,,,,,,,,,,
Carola3,1.012515477,-0.9138805701,-1.029555946,1.2098418178,,,,,,,,,,,,
ChainDE,0.7531428339,-1.5347213402,0.0051270781,-0.1202276702,,,,,,,,,,,,
Cobyla,0.0,-0.3451057176,-0.1327329683,1.9291307781,,,,,,,,,,,,
DE,0.7834550046,-1.5979675035,-1.6869894188,3.2681173276,2.0050198666,2.0279876859,-1.0166364165,-4.7120678459,,,,,,,,
ECMA,1.012515477,-0.9138805701,-1.029555946,1.2098418178,,,,,,,,,,,,
Expand Down Expand Up @@ -118,6 +119,7 @@ TinyQODE,-0.0018339146,0.0,0.0004307273,0.0008416212,,,,,,,,,,,,
TinySPSA,9.32387e-05,-9.32386e-05,9.32386e-05,9.32387e-05,,,,,,,,,,,,
TinySQP,9.999e-07,-1.6e-06,0.0,7.9999e-06,,,,,,,,,,,,
VLPCMA,1.012515477,-0.9138805701,-1.029555946,1.2098418178,,,,,,,,,,,,
VastDE,501.872303779,138.8461771562,640.7611133417,527.3326651107,,,,,,,,,,,,
Wiz,0.0,0.0,0.0,0.0,,,,,,,,,,,,
Zero,0.0,0.0,0.0,0.0,,,,,,,,,,,,
cGA,0.0509603282,0.1315286387,-0.0393104602,0.7333300801,,,,,,,,,,,,
Expand Down
11 changes: 10 additions & 1 deletion nevergrad/optimization/test_optimizerlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,16 @@ def test_optimizers_minimal(name: str) -> None:
optimizer_cls = registry[name]
if any(x in name for x in ["SMAC", "BO", "AX"]) and os.environ.get("CIRCLECI", False):
raise SkipTest("too slow for CircleCI!")
if optimizer_cls.one_shot or name in ["CM", "NLOPT_LN_PRAXIS", "ES", "RecMixES", "RecMutDE", "RecES"]:
if optimizer_cls.one_shot or name in [
"CM",
"NLOPT_LN_PRAXIS",
"ES",
"RecMixES",
"RecMutDE",
"RecES",
"VastLengler",
"VastDE",
]:
return
if any(
x in str(optimizer_cls)
Expand Down

0 comments on commit 8442810

Please sign in to comment.