Skip to content

Commit

Permalink
Update nb & fix coordinate update
Browse files Browse the repository at this point in the history
  • Loading branch information
RobertTLange committed Oct 24, 2021
1 parent f61a3a1 commit 5200f5f
Show file tree
Hide file tree
Showing 7 changed files with 521 additions and 534 deletions.
17 changes: 5 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
[![PyPI version](https://badge.fury.io/py/mle-hyperopt.svg)](https://badge.fury.io/py/mle-hyperopt)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RobertTLange/mle-hyperopt/blob/main/examples/getting_started.ipynb)
<a href="docs/logo_transparent.png_2"><img src="docs/logo_transparent.png" width="200" align="right" /></a>
<a href="https://github.com/RobertTLange/mle-hyperopt/blob/main/docs/logo_transparent.png?raw=true"><img src="https://github.com/RobertTLange/mle-hyperopt/blob/main/docs/logo_transparent.png?raw=true" width="200" align="right" /></a>

The `mle-hyperopt` package provides a simple and intuitive API for hyperparameter optimization of your Machine Learning Experiment (MLE) pipeline. It supports real, integer & categorical search variables and single- or multi-objective optimization.

Expand Down Expand Up @@ -131,10 +131,10 @@ strategy.print_ranking(top_k=3)
# Refine the search space after 5 & 10 iterations based on top 2 configurations
strategy = RandomSearch(real={"lrate": {"begin": 0.1,
"end": 0.5,
"prior": "uniform"}},
"prior": "log-uniform"}},
integer={"batch_size": {"begin": 1,
"end": 5,
"prior": "log-uniform"}},
"prior": "uniform"}},
categorical={"arch": ["mlp", "cnn"]},
search_config={"refine_after": [5, 10],
"refine_top_k": 2})
Expand All @@ -144,15 +144,8 @@ strategy.tell(...)
strategy.refine(top_k=2)
```

Note the search space refinement is only implemented for random, SMBO and nevergrad-based search strategies.
Note that the search space refinement is only implemented for random, SMBO and nevergrad-based search strategies.

## Development & Milestones for Next Release

You can run the test suite via `python -m pytest -vv tests/`. If you find a bug or are missing your favourite feature, feel free to contact me [@RobertTLange](https://twitter.com/RobertTLange) or create an issue :hugs:. Here are some features I want to implement for the next release:

- [ ] Add text to notebook for what is implemented
- [ ] Update Readme text
- [ ] Update mle-toolbox webpage intro
- [ ] Release and make sure installation works
- [ ] Draft tweet for release
- [ ] Synergies with mle-logging
You can run the test suite via `python -m pytest -vv tests/`. If you find a bug or are missing your favourite feature, feel free to contact me [@RobertTLange](https://twitter.com/RobertTLange) or create an issue :hugs:.
Binary file modified docs/logo_transparent.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
990 changes: 487 additions & 503 deletions examples/getting_started.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion mle_hyperopt/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.2"
__version__ = "0.0.3"
12 changes: 8 additions & 4 deletions mle_hyperopt/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from mle_hyperopt.comms import welcome_message, update_message, ranking_message
import matplotlib.pyplot as plt
import seaborn as sns
from rich.console import Console

sns.set(
context="poster",
Expand Down Expand Up @@ -129,7 +130,7 @@ def tell(
del proposal_clean[k]

if proposal_clean in self.all_evaluated_params:
print(f"{batch_proposals[i]} was previously evaluated.")
Console().log(f"{batch_proposals[i]} was previously evaluated.")
else:
self.log.append(
{
Expand Down Expand Up @@ -170,7 +171,9 @@ def save(self, save_path: str = "search_log.json", verbose: bool = False):
"""Store the state of the optimizer (parameters, values) as .pkl."""
save_json(self.log, save_path)
if verbose:
print(f"Stored {self.eval_counter} search iterations --> {save_path}.")
Console().log(
f"Stored {self.eval_counter} search iterations --> {save_path}."
)

def load(
self,
Expand All @@ -190,7 +193,7 @@ def load(
self.tell([iter["params"]], [iter["objective"]], True)

if reload_path is not None or reload_list is not None:
print(
Console().log(
f"Reloaded {self.eval_counter - prev_evals}"
" previous search iterations."
)
Expand Down Expand Up @@ -254,7 +257,8 @@ def plot_best(self):
timeseries = np.maximum.accumulate(objective_evals)

fig, ax = plt.subplots()
ax.plot(timeseries)
# Use rich logging!!!
ax.plot(np.arange(1, len(timeseries) + 1), timeseries)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_title("Best Objective Value")
Expand Down
29 changes: 15 additions & 14 deletions mle_hyperopt/strategies/coordinate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from ..search import HyperOpt
from ..spaces import GridSpace
import numpy as np
from rich.console import Console


class CoordinateSearch(HyperOpt):
Expand Down Expand Up @@ -32,27 +33,22 @@ def __init__(
verbose,
)
self.evals_per_coord = [0]
var_counter = 0
for k in self.search_config["order"]:
if self.real is not None:
if k in self.real.keys():
self.evals_per_coord.append(self.real[k]["bins"])
self.evals_per_coord.append(self.real[k]["bins"] + var_counter)
var_counter += 1

if self.integer is not None:
if k in self.integer.keys():
range_int = (
np.linspace(
int(self.integer[k]["begin"]),
int(self.integer[k]["end"]),
int(self.integer[k]["bins"]),
)
.astype(int)
.tolist()
)
self.evals_per_coord.append(len(range_int))
self.evals_per_coord.append(self.integer[k]["bins"] + var_counter)
var_counter += 1

if self.categorical is not None:
for k in self.categorical.keys():
self.evals_per_coord.append(len(self.categorical[k]))
self.evals_per_coord.append(len(self.categorical[k]) + var_counter)
var_counter += 1
self.range_per_coord = np.cumsum(self.evals_per_coord)

# Sequentially set-up different grid spaces - initialize 1st one
Expand Down Expand Up @@ -95,7 +91,7 @@ def tell_search(self, batch_proposals: list, perf_measures: list):
self.grid_var_counter = (
self.eval_counter - self.range_per_coord[self.var_counter]
)
if self.grid_var_counter == len(self.space):
if self.grid_var_counter >= len(self.space) - self.var_counter:
self.var_counter += 1
if self.var_counter < len(self.search_config["order"]):
self.construct_active_space()
Expand All @@ -107,10 +103,15 @@ def construct_active_space(self):
if self.eval_counter > 0:
idx, config, eval = self.get_best()
for k, v in config.items():
self.search_config["defaults"][k] = v
if k == self.search_config["order"][self.var_counter - 1]:
self.search_config["defaults"][k] = v
if self.verbose:
Console().log(f"Fixed `{k}` hyperparameter to {v}.")

# Increase active variable counter and reset grid counter
self.active_var = self.search_config["order"][self.var_counter]
if self.verbose:
Console().log(f"New active variable `{self.active_var}`.")

# Create new grid search space - if fixed: Create categorical
# Note: Only one variable is 'active' at every time
Expand Down
5 changes: 5 additions & 0 deletions mle_hyperopt/strategies/nevergrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ def __init__(
if self.verbose:
self.print_hello()

@property
def optimizers(self):
""" Returns list of available nevergrad optimizers. """
return sorted(ng.optimizers.registry.keys())

def init_optimizer(self):
"""Initialize the surrogate model/hyperparam config proposer."""
assert self.search_config["optimizer"] in list(dict(ng.optimizers.registry).keys())
Expand Down

0 comments on commit 5200f5f

Please sign in to comment.