Skip to content

Commit

Permalink
add learn_continuous option for the auto_koopman function (#32)
Browse files Browse the repository at this point in the history
  • Loading branch information
EthanJamesLew committed Mar 11, 2024
1 parent 3faff9a commit 5a4639d
Showing 1 changed file with 39 additions and 10 deletions.
49 changes: 39 additions & 10 deletions autokoopman/autokoopman.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
TrajectoryScoring,
)
from autokoopman.estimator.koopman import KoopmanDiscEstimator
from autokoopman.estimator.koopman import KoopmanContinuousEstimator
from autokoopman.tuner.gridsearch import GridSearchTuner
from autokoopman.tuner.montecarlo import MonteCarloTuner
from autokoopman.tuner.bayesianopt import BayesianOptTuner
Expand Down Expand Up @@ -97,32 +98,41 @@ def get_parameter_space(obs_type, threshold_range, rank):
)


def get_estimator(obs_type, sampling_period, dim, obs, hyperparams, normalize):
def get_estimator(
obs_type, learn_continuous, sampling_period, dim, obs, hyperparams, normalize
):
"""from the myriad of user suppled switches, select the right estimator"""

def inst_estimator(*args, **kwargs):
if learn_continuous:
return KoopmanContinuousEstimator(args[0], *args[2:], **kwargs)
else:
return KoopmanDiscEstimator(*args, **kwargs)

if obs_type == "rff":
observables = kobs.IdentityObservable() | kobs.RFFObservable(
dim, obs, hyperparams[0]
)
return KoopmanDiscEstimator(
return inst_estimator(
observables, sampling_period, dim, rank=hyperparams[1], normalize=normalize
)
elif obs_type == "quadratic":
observables = kobs.IdentityObservable() | kobs.QuadraticObservable(dim)
return KoopmanDiscEstimator(
return inst_estimator(
observables, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
elif obs_type == "poly":
observables = kobs.PolynomialObservable(dim, hyperparams[0])
return KoopmanDiscEstimator(
return inst_estimator(
observables, sampling_period, dim, rank=hyperparams[1], normalize=normalize
)
elif obs_type == "id":
observables = kobs.IdentityObservable()
return KoopmanDiscEstimator(
return inst_estimator(
observables, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
elif isinstance(obs_type, KoopmanObservable):
return KoopmanDiscEstimator(
return inst_estimator(
obs_type, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
else:
Expand All @@ -132,6 +142,7 @@ def get_estimator(obs_type, sampling_period, dim, obs, hyperparams, normalize):
def auto_koopman(
training_data: Union[TrajectoriesData, Sequence[np.ndarray]],
inputs_training_data: Optional[Sequence[np.ndarray]] = None,
learn_continuous: bool = False,
sampling_period: Optional[float] = None,
normalize: bool = False,
opt: Union[str, HyperparameterTuner] = "monte-carlo",
Expand All @@ -158,14 +169,15 @@ def auto_koopman(
:param training_data: training trajectories data from which to learn the system
:param inputs_training_data: optional input trajectories data from which to learn the system (this isn't needed if the training data has inputs already)
:param learn_continuous: whether to learn a continuous time or discrete time Koopman estimator
:param sampling_period: (for discrete time system) sampling period of training data
:param normalize: normalize the states of the training trajectories
:param opt: hyperparameter optimizer {"grid", "monte-carlo", "bopt"}
:param max_opt_iter: maximum iterations for the tuner to use
:param max_epochs: maximum number of training epochs
:param n_splits: (for optimizers) if set, switches to k-folds bootstrap validation for the hyperparameter tuning. This is useful for things like RFF tuning where the results have noise.
:param obs_type: (for koopman) koopman observables to use {"rff", "quadratic", "poly", "id", "deep"}
:param cost_func: cost function to use for hyperparameter optimization
:param cost_func: cost function to use for hyperparameter optimization {"total", "end", "relative"}
:param n_obs: (for koopman) number of observables to use (if applicable)
:param rank: (for koopman) rank range (start, stop) or (start, stop, step)
:param grid_param_slices: (for grid tuner) resolution to slice continuous valued parameters into
Expand Down Expand Up @@ -216,6 +228,8 @@ def auto_koopman(

# get the hyperparameter map
if obs_type in {"deep"}:
if learn_continuous:
raise ValueError("deep learning is only for discrete systems")
modelmap = _deep_model_map(
training_data,
max_epochs,
Expand All @@ -229,6 +243,7 @@ def auto_koopman(
else:
modelmap = _edmd_model_map(
training_data,
learn_continuous,
rank,
obs_type,
n_obs,
Expand Down Expand Up @@ -327,11 +342,19 @@ def get_model(self, hyperparams: Sequence):


def _edmd_model_map(
training_data, rank, obs_type, n_obs, lengthscale, sampling_period, normalize
training_data,
learn_continuous,
rank,
obs_type,
n_obs,
lengthscale,
sampling_period,
normalize,
) -> HyperparameterMap:
"""model map for eDMD based methods
:param training_data:
:param training_data: trajectories training dataset
:param learn_continuous: whether to learn continuous time or discrete time Koopman estimator
:param rank: set of ranks to try (of DMD rank parameter)
:param obs_type:
:param n_obs: some obs type require a number of observables
Expand Down Expand Up @@ -363,7 +386,13 @@ def __init__(self):

def get_model(self, hyperparams: Sequence):
return get_estimator(
obs_type, sampling_period, dim, n_obs, hyperparams, normalize
obs_type,
learn_continuous,
sampling_period,
dim,
n_obs,
hyperparams,
normalize,
)

# get the hyperparameter map
Expand Down

0 comments on commit 5a4639d

Please sign in to comment.