diff --git a/Project.toml b/Project.toml
index d1dc0525..d0d68d99 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "ModelPredictiveControl"
uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c"
authors = ["Francis Gagnon"]
-version = "0.23.1"
+version = "0.24.0"
[deps]
ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e"
diff --git a/docs/src/assets/plot_controller.svg b/docs/src/assets/plot_controller.svg
index 78112bb4..5d488e7b 100644
--- a/docs/src/assets/plot_controller.svg
+++ b/docs/src/assets/plot_controller.svg
@@ -1,108 +1,114 @@
\ No newline at end of file
diff --git a/docs/src/assets/plot_estimator.svg b/docs/src/assets/plot_estimator.svg
index 38064053..61489639 100644
--- a/docs/src/assets/plot_estimator.svg
+++ b/docs/src/assets/plot_estimator.svg
@@ -1,110 +1,106 @@
\ No newline at end of file
diff --git a/docs/src/internals/predictive_control.md b/docs/src/internals/predictive_control.md
index 374c152f..7f95c21d 100644
--- a/docs/src/internals/predictive_control.md
+++ b/docs/src/internals/predictive_control.md
@@ -33,5 +33,5 @@ ModelPredictiveControl.linconstraint!(::PredictiveController, ::LinModel)
## Solve Optimization Problem
```@docs
-ModelPredictiveControl.optim_objective!
+ModelPredictiveControl.optim_objective!(::PredictiveController)
```
diff --git a/docs/src/internals/state_estim.md b/docs/src/internals/state_estim.md
index 744b796d..7a01a547 100644
--- a/docs/src/internals/state_estim.md
+++ b/docs/src/internals/state_estim.md
@@ -34,6 +34,12 @@ ModelPredictiveControl.initpred!(::MovingHorizonEstimator, ::LinModel)
ModelPredictiveControl.linconstraint!(::MovingHorizonEstimator, ::LinModel)
```
+## Solve Optimization Problem
+
+```@docs
+ModelPredictiveControl.optim_objective!(::MovingHorizonEstimator)
+```
+
## Evaluate Estimated Output
```@docs
diff --git a/docs/src/manual/linmpc.md b/docs/src/manual/linmpc.md
index 41b43986..69edb189 100644
--- a/docs/src/manual/linmpc.md
+++ b/docs/src/manual/linmpc.md
@@ -203,7 +203,7 @@ mpc_mhe = LinMPC(estim, Hp=10, Hc=2, Mwt=[1, 1], Nwt=[0.1, 0.1])
mpc_mhe = setconstraint!(mpc_mhe, ymin=[45, -Inf])
```
-The rejection is not improved here:
+The rejection is indeed improved:
```@example 1
setstate!(model, zeros(model.nx))
@@ -216,10 +216,6 @@ savefig("plot3_LinMPC.svg"); nothing # hide
![plot3_LinMPC](plot3_LinMPC.svg)
-This is because the more performant `direct=true` version of the [`MovingHorizonEstimator`](@ref)
-is not not implemented yet. The rejection will be improved with the `direct=true` version
-(coming soon).
-
## Adding Feedforward Compensation
Suppose that the load disturbance ``u_l`` of the last section is in fact caused by a
diff --git a/src/controller/construct.jl b/src/controller/construct.jl
index 4e1fb86e..f4d588aa 100644
--- a/src/controller/construct.jl
+++ b/src/controller/construct.jl
@@ -44,7 +44,7 @@ The predictive controllers support both soft and hard constraints, defined by:
\mathbf{u_{min} - c_{u_{min}}} ϵ ≤&&\ \mathbf{u}(k+j) &≤ \mathbf{u_{max} + c_{u_{max}}} ϵ &&\qquad j = 0, 1 ,..., H_p - 1 \\
\mathbf{Δu_{min} - c_{Δu_{min}}} ϵ ≤&&\ \mathbf{Δu}(k+j) &≤ \mathbf{Δu_{max} + c_{Δu_{max}}} ϵ &&\qquad j = 0, 1 ,..., H_c - 1 \\
\mathbf{y_{min} - c_{y_{min}}} ϵ ≤&&\ \mathbf{ŷ}(k+j) &≤ \mathbf{y_{max} + c_{y_{max}}} ϵ &&\qquad j = 1, 2 ,..., H_p \\
- \mathbf{x̂_{min} - c_{x̂_{min}}} ϵ ≤&&\ \mathbf{x̂}_{i}(k+j) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ϵ &&\qquad j = H_p
+ \mathbf{x̂_{min} - c_{x̂_{min}}} ϵ ≤&&\ \mathbf{x̂}_i(k+j) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ϵ &&\qquad j = H_p
\end{alignat*}
```
and also ``ϵ ≥ 0``. The last line is the terminal constraints applied on the states at the
@@ -435,7 +435,7 @@ The model predictions are evaluated from the deviation vectors (see [`setop!`](@
&= \mathbf{E ΔU} + \mathbf{F}
\end{aligned}
```
-in which ``\mathbf{x̂_0}(k) = \mathbf{x̂}_{i}(k) - \mathbf{x̂_{op}}``, with ``i = k`` if
+in which ``\mathbf{x̂_0}(k) = \mathbf{x̂}_i(k) - \mathbf{x̂_{op}}``, with ``i = k`` if
`estim.direct==true`, otherwise ``i = k - 1``. The predicted outputs ``\mathbf{Ŷ_0}`` and
measured disturbances ``\mathbf{D̂_0}`` respectively include ``\mathbf{ŷ_0}(k+j)`` and
``\mathbf{d̂_0}(k+j)`` values with ``j=1`` to ``H_p``, and input increments ``\mathbf{ΔU}``,
@@ -453,8 +453,9 @@ terminal states at ``k+H_p``:
&= \mathbf{e_x̂ ΔU} + \mathbf{f_x̂}
\end{aligned}
```
-The ``\mathbf{F}`` and ``\mathbf{f_x̂}`` vectors are recalculated at each control period
-``k``, see [`initpred!`](@ref) and [`linconstraint!`](@ref).
+The matrices ``\mathbf{E, G, J, K, V, B, e_x̂, g_x̂, j_x̂, k_x̂, v_x̂, b_x̂}`` are defined in the
+Extended Help section. The ``\mathbf{F}`` and ``\mathbf{f_x̂}`` vectors are recalculated at
+each control period ``k``, see [`initpred!`](@ref) and [`linconstraint!`](@ref).
# Extended Help
!!! details "Extended Help"
@@ -529,7 +530,7 @@ function init_predmat(estim::StateEstimator{NT}, model::LinModel, Hp, Hc) where
# Apow_csum 3D array : Apow_csum[:,:,1] = A^0, Apow_csum[:,:,2] = A^1 + A^0, ...
Âpow_csum = cumsum(Âpow, dims=3)
# helper function to improve code clarity and be similar to eqs. in docstring:
- getpower(array3D, power) = array3D[:,:, power+1]
+ getpower(array3D, power) = @views array3D[:,:, power+1]
# --- state estimates x̂ ---
kx̂ = getpower(Âpow, Hp)
K = Matrix{NT}(undef, Hp*ny, nx̂)
diff --git a/src/controller/execute.jl b/src/controller/execute.jl
index 5b4e355e..ea93627f 100644
--- a/src/controller/execute.jl
+++ b/src/controller/execute.jl
@@ -17,7 +17,8 @@ Solve the optimization problem of `mpc` [`PredictiveController`](@ref) and retur
results ``\mathbf{u}(k)``. Following the receding horizon principle, the algorithm discards
the optimal future manipulated inputs ``\mathbf{u}(k+1), \mathbf{u}(k+2), ...`` Note that
the method mutates `mpc` internal data but it does not modifies `mpc.estim` states. Call
-[`updatestate!(mpc, u, ym, d)`](@ref) to update `mpc` state estimates.
+[`preparestate!(mpc, ym, d)`](@ref) before `moveinput!`, and [`updatestate!(mpc, u, ym, d)`](@ref)
+after, to update `mpc` state estimates.
Calling a [`PredictiveController`](@ref) object calls this method.
diff --git a/src/estimator/construct.jl b/src/estimator/construct.jl
index ae45658d..4c43d3d2 100644
--- a/src/estimator/construct.jl
+++ b/src/estimator/construct.jl
@@ -197,8 +197,9 @@ end
One integrator on each measured output by default if `model` is not a [`LinModel`](@ref).
-If the integrator quantity per manipulated input `nint_u ≠ 0`, the method returns zero
-integrator on each measured output.
+Theres is no verification the augmented model remains observable. If the integrator quantity
+per manipulated input `nint_u ≠ 0`, the method returns zero integrator on each measured
+output.
"""
function default_nint(model::SimModel, i_ym=1:model.ny, nint_u=0)
validate_ym(model, i_ym)
diff --git a/src/estimator/execute.jl b/src/estimator/execute.jl
index 2891936f..fc620811 100644
--- a/src/estimator/execute.jl
+++ b/src/estimator/execute.jl
@@ -86,7 +86,7 @@ end
Init `estim.x̂0` states from current inputs `u`, measured outputs `ym` and disturbances `d`.
-The method tries to find a good steady-state for the initial estimate ``\mathbf{x̂}(0)``. It
+The method tries to find a good steady-state for the initial estimate ``\mathbf{x̂}``. It
removes the operating points with [`remove_op!`](@ref) and call [`init_estimate!`](@ref):
- If `estim.model` is a [`LinModel`](@ref), it finds the steady-state of the augmented model
@@ -203,7 +203,7 @@ Prepare `estim.x̂0` estimate with meas. outputs `ym` and dist. `d` for the curr
This function should be called at the beginning of each discrete time step. Its behavior
depends if `estim` is a [`StateEstimator`](@ref) in the current/filter (1.) or
-delayed/predictor (2.) form:
+delayed/predictor (2.) formulation:
1. If `estim.direct` is `true`, it removes the operating points with [`remove_op!`](@ref),
calls [`correct_estimate!`](@ref), and returns the corrected state estimate
@@ -246,7 +246,9 @@ This function should be called at the end of each discrete time step. It removes
operating points with [`remove_op!`](@ref), calls [`update_estimate!`](@ref) and returns the
state estimate for the next time step ``\mathbf{x̂}_k(k+1)``. The method [`preparestate!`](@ref)
should be called prior to this one to correct the estimate when applicable (if
-`estim.direct == true`).
+`estim.direct == true`). Note that the [`MovingHorizonEstimator`](@ref) with the default
+`direct=true` option is not able to estimate ``\mathbf{x̂}_k(k+1)``, the returned value
+is therefore the current corrected state ``\mathbf{x̂}_k(k)``.
# Examples
```jldoctest
diff --git a/src/estimator/internal_model.jl b/src/estimator/internal_model.jl
index be61da46..d54d66cc 100644
--- a/src/estimator/internal_model.jl
+++ b/src/estimator/internal_model.jl
@@ -7,6 +7,7 @@ struct InternalModel{NT<:Real, SM<:SimModel} <: StateEstimator{NT}
x̂d::Vector{NT}
x̂s::Vector{NT}
ŷs::Vector{NT}
+ x̂snext::Vector{NT}
i_ym::Vector{Int}
nx̂::Int
nym::Int
@@ -43,14 +44,14 @@ struct InternalModel{NT<:Real, SM<:SimModel} <: StateEstimator{NT}
lastu0 = zeros(NT, nu)
# x̂0 and x̂d are same object (updating x̂d will update x̂0):
x̂d = x̂0 = zeros(NT, model.nx)
- x̂s = zeros(NT, nxs)
+ x̂s, x̂snext = zeros(NT, nxs), zeros(NT, nxs)
ŷs = zeros(NT, ny)
direct = true # InternalModel always uses direct transmission from ym
corrected = [false]
buffer = StateEstimatorBuffer{NT}(nu, nx̂, nym, ny, nd)
return new{NT, SM}(
model,
- lastu0, x̂op, f̂op, x̂0, x̂d, x̂s, ŷs,
+ lastu0, x̂op, f̂op, x̂0, x̂d, x̂s, ŷs, x̂snext,
i_ym, nx̂, nym, nyu, nxs,
As, Bs, Cs, Ds,
Â, B̂u, Ĉ, B̂d, D̂d, Ĉm, D̂dm,
@@ -247,9 +248,14 @@ function correct_estimate!(estim::InternalModel, y0m, d0)
ŷ0d = estim.buffer.ŷ
h!(ŷ0d, estim.model, estim.x̂d, d0)
ŷs = estim.ŷs
- ŷs[estim.i_ym] .= @views y0m .- ŷ0d[estim.i_ym]
- # ŷs=0 for unmeasured outputs :
- map(i -> ŷs[i] = (i in estim.i_ym) ? ŷs[i] : 0, eachindex(ŷs))
+ for j in eachindex(ŷs) # broadcasting was allocating unexpectedly, so we use a loop
+ if j in estim.i_ym
+ i = estim.i_ym[j]
+ ŷs[j] = y0m[i] - ŷ0d[j]
+ else
+ ŷs[j] = 0
+ end
+ end
return nothing
end
@@ -276,7 +282,7 @@ function update_estimate!(estim::InternalModel, _ , d0, u0)
f!(x̂dnext, model, x̂d, u0, d0)
x̂d .= x̂dnext # this also updates estim.x̂0 (they are the same object)
# --------------- stochastic model -----------------------
- x̂snext = similar(x̂s) # TODO: remove this allocation with a new buffer?
+ x̂snext = estim.x̂snext
mul!(x̂snext, estim.Âs, x̂s)
mul!(x̂snext, estim.B̂s, ŷs, 1, 1)
estim.x̂s .= x̂snext
diff --git a/src/estimator/kalman.jl b/src/estimator/kalman.jl
index d1bdd3bd..52a53c9c 100644
--- a/src/estimator/kalman.jl
+++ b/src/estimator/kalman.jl
@@ -92,9 +92,12 @@ The arguments are in standard deviations σ, i.e. same units than outputs and st
matrices ``\mathbf{Â, B̂_u, B̂_d, Ĉ, D̂_d}`` are `model` matrices augmented with the stochastic
model, which is specified by the numbers of integrator `nint_u` and `nint_ym` (see Extended
Help). Likewise, the covariance matrices are augmented with ``\mathbf{Q̂ = \text{diag}(Q,
-Q_{int_u}, Q_{int_{ym}})}`` and ``\mathbf{R̂ = R}``. The matrices ``\mathbf{Ĉ^m, D̂_d^m}`` are
-the rows of ``\mathbf{Ĉ, D̂_d}`` that correspond to measured outputs ``\mathbf{y^m}`` (and
-unmeasured ones, for ``\mathbf{Ĉ^u, D̂_d^u}``).
+Q_{int_u}, Q_{int_{ym}})}`` and ``\mathbf{R̂ = R}``. The Extended Help provide some guidelines
+on the covariance tuning. The matrices ``\mathbf{Ĉ^m, D̂_d^m}`` are the rows of
+``\mathbf{Ĉ, D̂_d}`` that correspond to measured outputs ``\mathbf{y^m}`` (and unmeasured
+ones, for ``\mathbf{Ĉ^u, D̂_d^u}``). The Kalman filter will estimate the current state with
+the newest measurements ``\mathbf{x̂}_k(k)`` if `direct` is `true`, else it will predict the
+state of the next time step ``\mathbf{x̂}_k(k+1)``.
# Arguments
!!! info
@@ -116,7 +119,7 @@ unmeasured ones, for ``\mathbf{Ĉ^u, D̂_d^u}``).
- `σQint_ym=fill(1,sum(nint_ym))` or *`sigmaQint_u`* : same than `σQ` for the unmeasured
disturbances at measured outputs ``\mathbf{Q_{int_{ym}}}`` (composed of integrators).
- `direct=true`: construct with a direct transmission from ``\mathbf{y^m}`` (a.k.a. current
- estimator, in opposition to the delayed/prediction form).
+ estimator, in opposition to the delayed/predictor form).
# Examples
```jldoctest
@@ -133,6 +136,11 @@ SteadyKalmanFilter estimator with a sample time Ts = 0.5 s, LinModel and:
# Extended Help
!!! details "Extended Help"
+ The `σR` argument is generally fixed at the estimated standard deviations of the sensor
+ noises. The `σQ`, `σQint_u` and `σQint_ym` arguments can be used to tune the filter
+ response. Increasing them make the filter more responsive to disturbances but more
+ sensitive to measurement noise.
+
The model augmentation with `nint_u` vector adds integrators at model manipulated inputs,
and `nint_ym`, at measured outputs. They create the integral action when the estimator
is used in a controller as state feedback. By default, the method [`default_nint`](@ref)
@@ -249,7 +257,7 @@ function correct_estimate_obsv!(estim::StateEstimator, y0m, d0, K̂)
end
"Allow code reuse for `SteadyKalmanFilter` and `Luenberger` (observers with constant gain)."
-function predict_estimate_obsv!(estim::StateEstimator, y0m, d0, u0)
+function predict_estimate_obsv!(estim::StateEstimator, _ , d0, u0)
x̂0corr = estim.x̂0
Â, B̂u, B̂d = estim.Â, estim.B̂u, estim.B̂d
x̂0next = estim.buffer.x̂
@@ -331,24 +339,40 @@ end
Construct a time-varying Kalman Filter with the [`LinModel`](@ref) `model`.
-The process model is identical to [`SteadyKalmanFilter`](@ref). The matrix
-``\mathbf{P̂}_k(k+1)`` is the estimation error covariance of `model` states augmented with
-the stochastic ones (specified by `nint_u` and `nint_ym`). Three keyword arguments modify
-its initial value with ``\mathbf{P̂}_{-1}(0) =
- \mathrm{diag}\{ \mathbf{P}(0), \mathbf{P_{int_{u}}}(0), \mathbf{P_{int_{ym}}}(0) \}``.
+The process model is identical to [`SteadyKalmanFilter`](@ref). The matrix ``\mathbf{P̂}`` is
+the estimation error covariance of `model` states augmented with the stochastic ones
+(specified by `nint_u` and `nint_ym`). Three keyword arguments specify its initial value with
+``\mathbf{P̂}_{-1}(0) = \mathrm{diag}\{ \mathbf{P}(0), \mathbf{P_{int_{u}}}(0),
+\mathbf{P_{int_{ym}}}(0) \}``. The initial state estimate ``\mathbf{x̂}_{-1}(0)`` can be
+manually specified with [`setstate!`](@ref), or automatically with [`initstate!`](@ref).
# Arguments
!!! info
Keyword arguments with *`emphasis`* are non-Unicode alternatives.
- `model::LinModel` : (deterministic) model for the estimations.
+- `i_ym=1:model.ny` : `model` output indices that are measured ``\mathbf{y^m}``, the rest
+ are unmeasured ``\mathbf{y^u}``.
- `σP_0=fill(1/model.nx,model.nx)` or *`sigmaP_0`* : main diagonal of the initial estimate
covariance ``\mathbf{P}(0)``, specified as a standard deviation vector.
+- `σQ=fill(1/model.nx,model.nx)` or *`sigmaQ`* : main diagonal of the process noise
+ covariance ``\mathbf{Q}`` of `model`, specified as a standard deviation vector.
+- `σR=fill(1,length(i_ym))` or *`sigmaR`* : main diagonal of the sensor noise covariance
+ ``\mathbf{R}`` of `model` measured outputs, specified as a standard deviation vector.
+- `nint_u=0`: integrator quantity for the stochastic model of the unmeasured disturbances at
+ the manipulated inputs (vector), use `nint_u=0` for no integrator.
+- `nint_ym=default_nint(model,i_ym,nint_u)` : same than `nint_u` but for the unmeasured
+ disturbances at the measured outputs, use `nint_ym=0` for no integrator.
+- `σQint_u=fill(1,sum(nint_u))` or *`sigmaQint_u`* : same than `σQ` but for the unmeasured
+ disturbances at manipulated inputs ``\mathbf{Q_{int_u}}`` (composed of integrators).
- `σPint_u_0=fill(1,sum(nint_u))` or *`sigmaPint_u_0`* : same than `σP_0` but for the unmeasured
disturbances at manipulated inputs ``\mathbf{P_{int_u}}(0)`` (composed of integrators).
+- `σQint_ym=fill(1,sum(nint_ym))` or *`sigmaQint_u`* : same than `σQ` for the unmeasured
+ disturbances at measured outputs ``\mathbf{Q_{int_{ym}}}`` (composed of integrators).
- `σPint_ym_0=fill(1,sum(nint_ym))` or *`sigmaPint_ym_0`* : same than `σP_0` but for the unmeasured
disturbances at measured outputs ``\mathbf{P_{int_{ym}}}(0)`` (composed of integrators).
-- `` of [`SteadyKalmanFilter`](@ref) constructor.
+- `direct=true`: construct with a direct transmission from ``\mathbf{y^m}`` (a.k.a. current
+ estimator, in opposition to the delayed/predictor form).
# Examples
```jldoctest
@@ -558,18 +582,42 @@ See [`SteadyKalmanFilter`](@ref) for details on ``\mathbf{v}(k), \mathbf{w}(k)``
state-space functions augmented with the stochastic model of the unmeasured disturbances,
which is specified by the numbers of integrator `nint_u` and `nint_ym` (see Extended Help).
The ``\mathbf{ĥ^m}`` function represents the measured outputs of ``\mathbf{ĥ}`` function
-(and unmeasured ones, for ``\mathbf{ĥ^u}``).
+(and unmeasured ones, for ``\mathbf{ĥ^u}``). The matrix ``\mathbf{P̂}`` is the estimation
+error covariance of `model` state augmented with the stochastic ones. Three keyword
+arguments specify its initial value with ``\mathbf{P̂}_{-1}(0) =
+\mathrm{diag}\{ \mathbf{P}(0), \mathbf{P_{int_{u}}}(0), \mathbf{P_{int_{ym}}}(0) \}``. The
+initial state estimate ``\mathbf{x̂}_{-1}(0)`` can be manually specified with [`setstate!`](@ref).
# Arguments
!!! info
Keyword arguments with *`emphasis`* are non-Unicode alternatives.
- `model::SimModel` : (deterministic) model for the estimations.
+- `i_ym=1:model.ny` : `model` output indices that are measured ``\mathbf{y^m}``, the rest
+ are unmeasured ``\mathbf{y^u}``.
+- `σP_0=fill(1/model.nx,model.nx)` or *`sigmaP_0`* : main diagonal of the initial estimate
+ covariance ``\mathbf{P}(0)``, specified as a standard deviation vector.
+- `σQ=fill(1/model.nx,model.nx)` or *`sigmaQ`* : main diagonal of the process noise
+ covariance ``\mathbf{Q}`` of `model`, specified as a standard deviation vector.
+- `σR=fill(1,length(i_ym))` or *`sigmaR`* : main diagonal of the sensor noise covariance
+ ``\mathbf{R}`` of `model` measured outputs, specified as a standard deviation vector.
+- `nint_u=0`: integrator quantity for the stochastic model of the unmeasured disturbances at
+ the manipulated inputs (vector), use `nint_u=0` for no integrator (see Extended Help).
+- `nint_ym=default_nint(model,i_ym,nint_u)` : same than `nint_u` but for the unmeasured
+ disturbances at the measured outputs, use `nint_ym=0` for no integrator (see Extended Help).
+- `σQint_u=fill(1,sum(nint_u))` or *`sigmaQint_u`* : same than `σQ` but for the unmeasured
+ disturbances at manipulated inputs ``\mathbf{Q_{int_u}}`` (composed of integrators).
+- `σPint_u_0=fill(1,sum(nint_u))` or *`sigmaPint_u_0`* : same than `σP_0` but for the unmeasured
+ disturbances at manipulated inputs ``\mathbf{P_{int_u}}(0)`` (composed of integrators).
+- `σQint_ym=fill(1,sum(nint_ym))` or *`sigmaQint_u`* : same than `σQ` for the unmeasured
+ disturbances at measured outputs ``\mathbf{Q_{int_{ym}}}`` (composed of integrators).
+- `σPint_ym_0=fill(1,sum(nint_ym))` or *`sigmaPint_ym_0`* : same than `σP_0` but for the unmeasured
+ disturbances at measured outputs ``\mathbf{P_{int_{ym}}}(0)`` (composed of integrators).
+- `direct=true`: construct with a direct transmission from ``\mathbf{y^m}`` (a.k.a. current
+ estimator, in opposition to the delayed/predictor form).
- `α=1e-3` or *`alpha`* : alpha parameter, spread of the state distribution ``(0 < α ≤ 1)``.
- `β=2` or *`beta`* : beta parameter, skewness and kurtosis of the states distribution ``(β ≥ 0)``.
- `κ=0` or *`kappa`* : kappa parameter, another spread parameter ``(0 ≤ κ ≤ 3)``.
-- `` of [`SteadyKalmanFilter`](@ref) constructor.
-- `` of [`KalmanFilter`](@ref) constructor.
# Examples
```jldoctest
@@ -586,11 +634,12 @@ UnscentedKalmanFilter estimator with a sample time Ts = 10.0 s, NonLinModel and:
# Extended Help
!!! details "Extended Help"
- The Extended Help of [`SteadyKalmanFilter`](@ref) details the augmentation with `nint_ym`
- and `nint_u` arguments. The default augmentation scheme is identical, that is `nint_u=0`
- and `nint_ym` computed by [`default_nint`](@ref). Note that the constructor does not
- validate the observability of the resulting augmented [`NonLinModel`](@ref). In such
- cases, it is the user's responsibility to ensure that it is still observable.
+ The Extended Help of [`SteadyKalmanFilter`](@ref) details the tuning of the covariances
+ and the augmentation with `nint_ym` and `nint_u` arguments. The default augmentation
+ scheme is identical, that is `nint_u=0` and `nint_ym` computed by [`default_nint`](@ref).
+ Note that the constructor does not validate the observability of the resulting augmented
+ [`NonLinModel`](@ref). In such cases, it is the user's responsibility to ensure that it
+ is still observable.
"""
function UnscentedKalmanFilter(
model::SM;
@@ -889,8 +938,9 @@ end
Construct an extended Kalman Filter with the [`SimModel`](@ref) `model`.
-Both [`LinModel`](@ref) and [`NonLinModel`](@ref) are supported. The process model is
-identical to [`UnscentedKalmanFilter`](@ref). The Jacobians of the augmented model
+Both [`LinModel`](@ref) and [`NonLinModel`](@ref) are supported. The process model and the
+keyword arguments are identical to [`UnscentedKalmanFilter`](@ref), except for `α`, `β` and
+`κ` which do not apply to the extended Kalman Filter. The Jacobians of the augmented model
``\mathbf{f̂, ĥ}`` are computed with [`ForwardDiff.jl`](https://github.com/JuliaDiff/ForwardDiff.jl)
automatic differentiation.
@@ -898,11 +948,6 @@ automatic differentiation.
See the Extended Help of [`linearize`](@ref) function if you get an error like:
`MethodError: no method matching (::var"##")(::Vector{ForwardDiff.Dual})`.
-# Arguments
-- `model::SimModel` : (deterministic) model for the estimations.
-- `` of [`SteadyKalmanFilter`](@ref) constructor.
-- `` of [`KalmanFilter`](@ref) constructor.
-
# Examples
```jldoctest
julia> model = NonLinModel((x,u,_)->0.2x+u, (x,_)->-3x, 5.0, 1, 1, 1, solver=nothing);
diff --git a/src/estimator/mhe/construct.jl b/src/estimator/mhe/construct.jl
index 6f2949ff..e4aa99f9 100644
--- a/src/estimator/mhe/construct.jl
+++ b/src/estimator/mhe/construct.jl
@@ -109,11 +109,8 @@ struct MovingHorizonEstimator{
buffer::StateEstimatorBuffer{NT}
function MovingHorizonEstimator{NT, SM, JM, CE}(
model::SM, He, i_ym, nint_u, nint_ym, P̂_0, Q̂, R̂, Cwt, optim::JM, covestim::CE;
- direct=false
+ direct=true
) where {NT<:Real, SM<:SimModel{NT}, JM<:JuMP.GenericModel, CE<:StateEstimator{NT}}
- if direct
- throw(ArgumentError("MovingHorizonEstimator: direct=true is not implemented yet"))
- end
nu, ny, nd = model.nu, model.ny, model.nd
He < 1 && throw(ArgumentError("Estimation horizon He should be ≥ 1"))
Cwt < 0 && throw(ArgumentError("Cwt weight should be ≥ 0"))
@@ -131,8 +128,9 @@ struct MovingHorizonEstimator{
invP̄ = Hermitian(inv(P̂_0), :L)
invQ̂_He = Hermitian(repeatdiag(inv(Q̂), He), :L)
invR̂_He = Hermitian(repeatdiag(inv(R̂), He), :L)
+ p = direct ? 0 : 1
E, G, J, B, ex̄, Ex̂, Gx̂, Jx̂, Bx̂ = init_predmat_mhe(
- model, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op
+ model, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op, p
)
# dummy values (updated just before optimization):
F, fx̄, Fx̂ = zeros(NT, nym*He), zeros(NT, nx̂), zeros(NT, nx̂*He)
@@ -145,7 +143,8 @@ struct MovingHorizonEstimator{
Z̃ = zeros(NT, nZ̃)
X̂op = repeat(x̂op, He)
X̂0, Y0m = zeros(NT, nx̂*He), zeros(NT, nym*He)
- U0, D0 = zeros(NT, nu*He), zeros(NT, nd*He)
+ nD0 = direct ? nd*(He+1) : nd*He
+ U0, D0 = zeros(NT, nu*He), zeros(NT, nD0)
Ŵ = zeros(NT, nx̂*He)
x̂0arr_old = zeros(NT, nx̂)
P̂arr_old = copy(P̂_0)
@@ -183,7 +182,7 @@ distribution is not approximated like the [`UnscentedKalmanFilter`](@ref). The c
costs are drastically higher, however, since it minimizes the following objective function
at each discrete time ``k``:
```math
-\min_{\mathbf{x̂}_k(k-N_k+1), \mathbf{Ŵ}, ϵ} \mathbf{x̄}' \mathbf{P̄}^{-1} \mathbf{x̄}
+\min_{\mathbf{x̂}_k(k-N_k+p), \mathbf{Ŵ}, ϵ} \mathbf{x̄}' \mathbf{P̄}^{-1} \mathbf{x̄}
+ \mathbf{Ŵ}' \mathbf{Q̂}_{N_k}^{-1} \mathbf{Ŵ}
+ \mathbf{V̂}' \mathbf{R̂}_{N_k}^{-1} \mathbf{V̂}
+ C ϵ^2
@@ -191,8 +190,8 @@ at each discrete time ``k``:
in which the arrival costs are evaluated from the states estimated at time ``k-N_k``:
```math
\begin{aligned}
- \mathbf{x̄} &= \mathbf{x̂}_{k-N_k}(k-N_k+1) - \mathbf{x̂}_k(k-N_k+1) \\
- \mathbf{P̄} &= \mathbf{P̂}_{k-N_k}(k-N_k+1)
+ \mathbf{x̄} &= \mathbf{x̂}_{k-N_k}(k-N_k+p) - \mathbf{x̂}_k(k-N_k+p) \\
+ \mathbf{P̄} &= \mathbf{P̂}_{k-N_k}(k-N_k+p)
\end{aligned}
```
and the covariances are repeated ``N_k`` times:
@@ -202,35 +201,58 @@ and the covariances are repeated ``N_k`` times:
\mathbf{R̂}_{N_k} &= \text{diag}\mathbf{(R̂,R̂,...,R̂)}
\end{aligned}
```
-The estimation horizon ``H_e`` limits the window length:
+The estimation horizon ``H_e`` limits the window length:
```math
-N_k = \begin{cases}
+N_k = \begin{cases}
k + 1 & k < H_e \\
H_e & k ≥ H_e \end{cases}
```
-The vectors ``\mathbf{Ŵ}`` and ``\mathbf{V̂}`` encompass the estimated process noise
-``\mathbf{ŵ}(k-j)`` and sensor noise ``\mathbf{v̂}(k-j)`` from ``j=N_k-1`` to ``0``. The
-Extended Help defines the two vectors, the slack variable ``ϵ``, and the estimation of the
-covariance at arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+1)``. See [`UnscentedKalmanFilter`](@ref)
-for details on the augmented process model and ``\mathbf{R̂}, \mathbf{Q̂}`` covariances.
+The vectors ``\mathbf{Ŵ}`` and ``\mathbf{V̂}`` respectively encompass the estimated process
+noises ``\mathbf{ŵ}(k-j+p)`` from ``j=N_k`` to ``1`` and sensor noises ``\mathbf{v̂}(k-j+1)``
+from ``j=N_k`` to ``1``. The Extended Help defines the two vectors, the slack variable
+``ϵ``, and the estimation of the covariance at arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+p)``. If
+the keyword argument `direct=true` (default value), the constant ``p=0`` in the equations
+above, and the MHE is in the current form. Else ``p=1``, leading to the prediction form.
+
+See [`UnscentedKalmanFilter`](@ref) for details on the augmented process model and
+``\mathbf{R̂}, \mathbf{Q̂}`` covariances.
!!! warning
See the Extended Help if you get an error like:
`MethodError: no method matching (::var"##")(::Vector{ForwardDiff.Dual})`.
# Arguments
-!!! warning
- The keyword argument `direct` defaults to `false` for the `MovingHorizonEstimator`,
- since `direct=true` is not implemented yet.
+!!! info
+ Keyword arguments with *`emphasis`* are non-Unicode alternatives.
- `model::SimModel` : (deterministic) model for the estimations.
- `He=nothing` : estimation horizon ``H_e``, must be specified.
+- `i_ym=1:model.ny` : `model` output indices that are measured ``\mathbf{y^m}``, the rest
+ are unmeasured ``\mathbf{y^u}``.
+- `σP_0=fill(1/model.nx,model.nx)` or *`sigmaP_0`* : main diagonal of the initial estimate
+ covariance ``\mathbf{P}(0)``, specified as a standard deviation vector.
+- `σQ=fill(1/model.nx,model.nx)` or *`sigmaQ`* : main diagonal of the process noise
+ covariance ``\mathbf{Q}`` of `model`, specified as a standard deviation vector.
+- `σR=fill(1,length(i_ym))` or *`sigmaR`* : main diagonal of the sensor noise covariance
+ ``\mathbf{R}`` of `model` measured outputs, specified as a standard deviation vector.
+- `nint_u=0`: integrator quantity for the stochastic model of the unmeasured disturbances at
+ the manipulated inputs (vector), use `nint_u=0` for no integrator (see Extended Help).
+- `nint_ym=default_nint(model,i_ym,nint_u)` : same than `nint_u` but for the unmeasured
+ disturbances at the measured outputs, use `nint_ym=0` for no integrator (see Extended Help).
+- `σQint_u=fill(1,sum(nint_u))` or *`sigmaQint_u`* : same than `σQ` but for the unmeasured
+ disturbances at manipulated inputs ``\mathbf{Q_{int_u}}`` (composed of integrators).
+- `σPint_u_0=fill(1,sum(nint_u))` or *`sigmaPint_u_0`* : same than `σP_0` but for the unmeasured
+ disturbances at manipulated inputs ``\mathbf{P_{int_u}}(0)`` (composed of integrators).
+- `σQint_ym=fill(1,sum(nint_ym))` or *`sigmaQint_u`* : same than `σQ` for the unmeasured
+ disturbances at measured outputs ``\mathbf{Q_{int_{ym}}}`` (composed of integrators).
+- `σPint_ym_0=fill(1,sum(nint_ym))` or *`sigmaPint_ym_0`* : same than `σP_0` but for the unmeasured
+ disturbances at measured outputs ``\mathbf{P_{int_{ym}}}(0)`` (composed of integrators).
- `Cwt=Inf` : slack variable weight ``C``, default to `Inf` meaning hard constraints only.
- `optim=default_optim_mhe(model)` : a [`JuMP.Model`](https://jump.dev/JuMP.jl/stable/api/JuMP/#JuMP.Model)
with a quadratic/nonlinear optimizer for solving (default to [`Ipopt`](https://github.com/jump-dev/Ipopt.jl),
or [`OSQP`](https://osqp.org/docs/parsers/jump.html) if `model` is a [`LinModel`](@ref)).
-- `` of [`SteadyKalmanFilter`](@ref) constructor.
-- `` of [`KalmanFilter`](@ref) constructor.
+- `direct=true`: construct with a direct transmission from ``\mathbf{y^m}`` (a.k.a. current
+ estimator, in opposition to the delayed/predictor form).
# Examples
```jldoctest
@@ -253,10 +275,10 @@ MovingHorizonEstimator estimator with a sample time Ts = 10.0 s, Ipopt optimizer
```math
\mathbf{Ŵ} =
\begin{bmatrix}
- \mathbf{ŵ}(k-N_k+1) \\
- \mathbf{ŵ}(k-N_k+2) \\
+ \mathbf{ŵ}(k-N_k+p+0) \\
+ \mathbf{ŵ}(k-N_k+p+1) \\
\vdots \\
- \mathbf{ŵ}(k)
+ \mathbf{ŵ}(k+p-1)
\end{bmatrix} , \quad
\mathbf{V̂} =
\begin{bmatrix}
@@ -273,13 +295,38 @@ MovingHorizonEstimator estimator with a sample time Ts = 10.0 s, Ipopt optimizer
\mathbf{x̂}_k(k-j+1) &= \mathbf{f̂}\Big(\mathbf{x̂}_k(k-j), \mathbf{u}(k-j), \mathbf{d}(k-j)\Big) + \mathbf{ŵ}(k-j)
\end{aligned}
```
- The slack variable ``ϵ`` relaxes the constraints if enabled, see [`setconstraint!`](@ref).
- It is disabled by default for the MHE (from `Cwt=Inf`) but it should be activated for
- problems with two or more types of bounds, to ensure feasibility (e.g. on the estimated
- state and sensor noise).
-
- The optimization and the estimation of the covariance at arrival
- ``\mathbf{P̂}_{k-N_k}(k-N_k+1)`` depend on `model`:
+ The constant ``p`` equals to `!direct`. In other words, ``\mathbf{Ŵ}`` and ``\mathbf{V̂}``
+ are shifted by one time step if `direct==true`. The non-default prediction form
+ with ``p=1`` is particularly useful for the MHE since it moves its expensive
+ computations after the MPC optimization. That is, [`preparestate!`](@ref) will solve the
+ optimization by default, but it can be postponed to [`updatestate!`](@ref) with
+ `direct=false`.
+
+ The Extended Help of [`SteadyKalmanFilter`](@ref) details the tuning of the covariances
+ and the augmentation with `nint_ym` and `nint_u` arguments. The default augmentation
+ scheme is identical, that is `nint_u=0` and `nint_ym` computed by [`default_nint`](@ref).
+ Note that the constructor does not validate the observability of the resulting augmented
+ [`NonLinModel`](@ref). In such cases, it is the user's responsibility to ensure that it
+ is still observable.
+
+ The estimation covariance at arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+p)`` gives an uncertainty
+ on the state estimate at the beginning of the window ``k-N_k+p``, that is, in the past.
+ It is not the same as the current estimate covariance ``\mathbf{P̂}_k(k)``, a value not
+ computed by the MHE (contrarily to e.g. the [`KalmanFilter`](@ref)). Three keyword
+ arguments specify its initial value with ``\mathbf{P̂_i} = \mathrm{diag}\{ \mathbf{P}(0),
+ \mathbf{P_{int_{u}}}(0), \mathbf{P_{int_{ym}}}(0) \}``. The initial state estimate
+ ``\mathbf{x̂_i}`` can be manually specified with [`setstate!`](@ref), or automatically
+ with [`initstate!`](@ref) for [`LinModel`](@ref). Note the MHE with ``p=0`` is slightly
+ inconsistent with all the other estimators here. It interprets the initial values as
+ ``\mathbf{x̂_i} = \mathbf{x̂}_{-1}(-1)`` and ``\mathbf{P̂_i} = \mathbf{P̂}_{-1}(-1)``, that
+ is, an *a posteriori* estimate[^2] from the last time step. The MHE with ``p=1`` is
+ consistent, interpreting them as ``\mathbf{x̂_i} = \mathbf{x̂}_{-1}(0)`` and
+ ``\mathbf{P̂_i} = \mathbf{P̂}_{-1}(0)``.
+
+ [^2]: M. Hovd (2012), "A Note On The Smoothing Formulation Of Moving Horizon Estimation",
+ *Facta Universitatis*, Vol. 11 №2.
+
+ The optimization and the update of the arrival covariance depend on `model`:
- If `model` is a [`LinModel`](@ref), the optimization is treated as a quadratic program
with a time-varying Hessian, which is generally cheaper than nonlinear programming. By
@@ -289,10 +336,14 @@ MovingHorizonEstimator estimator with a sample time Ts = 10.0 s, Ipopt optimizer
functions must be compatible with this feature. See [Automatic differentiation](https://jump.dev/JuMP.jl/stable/manual/nlp/#Automatic-differentiation)
for common mistakes when writing these functions. An [`UnscentedKalmanFilter`](@ref)
estimates the arrival covariance by default.
-
- Note that if `Cwt≠Inf`, the attribute `nlp_scaling_max_gradient` of `Ipopt` is set to
- `10/Cwt` (if not already set), to scale the small values of ``ϵ``. Use the second
- constructor to specify the covariance estimation method.
+
+ The slack variable ``ϵ`` relaxes the constraints if enabled, see [`setconstraint!`](@ref).
+ It is disabled by default for the MHE (from `Cwt=Inf`) but it should be activated for
+ problems with two or more types of bounds, to ensure feasibility (e.g. on the estimated
+ state ``\mathbf{x̂}`` and sensor noise ``\mathbf{v̂}``). Note that if `Cwt≠Inf`, the
+ attribute `nlp_scaling_max_gradient` of `Ipopt` is set to `10/Cwt` (if not already set),
+ to scale the small values of ``ϵ``. Use the second constructor to specify the arrival
+ covariance estimation method.
"""
function MovingHorizonEstimator(
model::SM;
@@ -309,7 +360,7 @@ function MovingHorizonEstimator(
sigmaQint_ym = fill(1, max(sum(nint_ym), 0)),
Cwt::Real = Inf,
optim::JM = default_optim_mhe(model),
- direct = false,
+ direct = true,
σP_0 = sigmaP_0,
σQ = sigmaQ,
σR = sigmaR,
@@ -341,16 +392,17 @@ default_optim_mhe(::SimModel) = JuMP.Model(DEFAULT_NONLINMHE_OPTIMIZER, add_brid
Construct the estimator from the augmented covariance matrices `P̂_0`, `Q̂` and `R̂`.
-This syntax allows nonzero off-diagonal elements in ``\mathbf{P̂}_{-1}(0), \mathbf{Q̂, R̂}``.
-The keyword argument `covestim` also allows specifying a custom [`StateEstimator`](@ref)
-object for the estimation of covariance at the arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+1)``. The
+This syntax allows nonzero off-diagonal elements in ``\mathbf{P̂_i}, \mathbf{Q̂, R̂}``,
+where ``\mathbf{P̂_i}`` is the initial estimation covariance, provided by `P̂_0` argument. The
+keyword argument `covestim` also allows specifying a custom [`StateEstimator`](@ref) object
+for the estimation of covariance at the arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+1)``. The
supported types are [`KalmanFilter`](@ref), [`UnscentedKalmanFilter`](@ref) and
[`ExtendedKalmanFilter`](@ref).
"""
function MovingHorizonEstimator(
model::SM, He, i_ym, nint_u, nint_ym, P̂_0, Q̂, R̂, Cwt=Inf;
optim::JM = default_optim_mhe(model),
- direct = false,
+ direct = true,
covestim::CE = default_covestim_mhe(model, i_ym, nint_u, nint_ym, P̂_0, Q̂, R̂; direct)
) where {NT<:Real, SM<:SimModel{NT}, JM<:JuMP.GenericModel, CE<:StateEstimator{NT}}
P̂_0, Q̂, R̂ = to_mat(P̂_0), to_mat(Q̂), to_mat(R̂)
@@ -375,8 +427,8 @@ It supports both soft and hard constraints on the estimated state ``\mathbf{x̂}
noise ``\mathbf{ŵ}`` and sensor noise ``\mathbf{v̂}``:
```math
\begin{alignat*}{3}
- \mathbf{x̂_{min} - c_{x̂_{min}}} ϵ ≤&&\ \mathbf{x̂}_k(k-j+1) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 0 \\
- \mathbf{ŵ_{min} - c_{ŵ_{min}}} ϵ ≤&&\ \mathbf{ŵ}(k-j+1) &≤ \mathbf{ŵ_{max} + c_{ŵ_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 1 \\
+ \mathbf{x̂_{min} - c_{x̂_{min}}} ϵ ≤&&\ \mathbf{x̂}_k(k-j+p) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 0 \\
+ \mathbf{ŵ_{min} - c_{ŵ_{min}}} ϵ ≤&&\ \mathbf{ŵ}(k-j+p) &≤ \mathbf{ŵ_{max} + c_{ŵ_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 1 \\
\mathbf{v̂_{min} - c_{v̂_{min}}} ϵ ≤&&\ \mathbf{v̂}(k-j+1) &≤ \mathbf{v̂_{max} + c_{v̂_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 1
\end{alignat*}
```
@@ -385,8 +437,8 @@ is no bound. The constraint softness parameters ``\mathbf{c}``, also called equa
for relaxation, are non-negative values that specify the softness of the associated bound.
Use `0.0` values for hard constraints (default for all of them). Notice that constraining
the estimated sensor noises is equivalent to bounding the innovation term, since
-``\mathbf{v̂}(k) = \mathbf{y^m}(k) - \mathbf{ŷ^m}(k)``. See Extended Help for details on
-model augmentation and time-varying constraints.
+``\mathbf{v̂}(k) = \mathbf{y^m}(k) - \mathbf{ŷ^m}(k)``. See Extended Help for details on
+the constant ``p``, on model augmentation and on time-varying constraints.
# Arguments
!!! info
@@ -422,9 +474,10 @@ MovingHorizonEstimator estimator with a sample time Ts = 1.0 s, OSQP optimizer,
# Extended Help
!!! details "Extended Help"
- Note that the state ``\mathbf{x̂}`` and process noise ``\mathbf{ŵ}`` constraints are
- applied on the augmented model, detailed in [`SteadyKalmanFilter`](@ref) Extended Help.
- For variable constraints, the bounds can be modified after calling [`updatestate!`](@ref),
+ The constant ``p=0`` if `estim.direct==true` (current form), else ``p=1`` (prediction
+ form). Note that the state ``\mathbf{x̂}`` and process noise ``\mathbf{ŵ}`` constraints
+ are applied on the augmented model, detailed in [`SteadyKalmanFilter`](@ref) Extended
+ Help. For variable constraints, the bounds can be modified after calling [`updatestate!`](@ref),
that is, at runtime, except for `±Inf` bounds. Time-varying constraints over the
estimation horizon ``H_e`` are also possible, mathematically defined as:
```math
@@ -894,47 +947,79 @@ end
@doc raw"""
init_predmat_mhe(
- model::LinModel, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op
+ model::LinModel, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op, p
) -> E, G, J, B, ex̄, Ex̂, Gx̂, Jx̂, Bx̂
-Construct the MHE prediction matrices for [`LinModel`](@ref) `model`.
+Construct the [`MovingHorizonEstimator`](@ref) prediction matrices for [`LinModel`](@ref) `model`.
We first introduce the deviation vector of the estimated state at arrival
-``\mathbf{x̂_0}(k-N_k+1) = \mathbf{x̂}_k(k-N_k+1) - \mathbf{x̂_{op}}`` (see [`setop!`](@ref)),
-and the vector ``\mathbf{Z} = [\begin{smallmatrix} \mathbf{x̂_0}(k-N_k+1)
-\\ \mathbf{Ŵ} \end{smallmatrix}]`` with the decision variables. The estimated sensor noises
-from time ``k-N_k+1`` to ``k`` are computed by:
+``\mathbf{x̂_0}(k-N_k+p) = \mathbf{x̂}_k(k-N_k+p) - \mathbf{x̂_{op}}`` (see [`setop!`](@ref)),
+and the vector ``\mathbf{Z} = [\begin{smallmatrix} \mathbf{x̂_0}(k-N_k+p)
+\\ \mathbf{Ŵ} \end{smallmatrix}]`` with the decision variables. Setting the constant ``p=0``
+produces an estimator in the current form, while the prediction form is obtained with
+``p=1``. The estimated sensor noises from time ``k-N_k+1`` to ``k`` are computed by:
```math
\begin{aligned}
\mathbf{V̂} = \mathbf{Y_0^m - Ŷ_0^m} &= \mathbf{E Z + G U_0 + J D_0 + Y_0^m + B} \\
&= \mathbf{E Z + F}
\end{aligned}
```
-in which ``\mathbf{U_0, D_0, Y_0^m}`` respectively include the deviation values of the
-manipulated inputs ``\mathbf{u_0}(k-j)``, meas. disturbances ``\mathbf{d_0}(k-j)`` and meas.
-outputs ``\mathbf{y_0^m}(k-j)`` from ``j=N_k-1`` to ``0``. The vector ``\mathbf{B}``
-includes the contribution for non-zero state ``\mathbf{x̂_{op}}`` and state update
-``\mathbf{f̂_{op}}`` operating points (for linearization, see [`linearize`](@ref)). The
-method also returns matrices for the estimation error at arrival:
+in which ``\mathbf{U_0}`` and ``\mathbf{Y_0^m}`` respectively include the deviation values of
+the manipulated inputs ``\mathbf{u_0}(k-j+p)`` from ``j=N_k`` to ``1`` and measured outputs
+``\mathbf{y_0^m}(k-j+1)`` from ``j=N_k`` to ``1``. The vector ``\mathbf{D_0}`` comprises one
+additional measured disturbance if ``p=0``, that is, it includes the deviation vectors
+``\mathbf{d_0}(k-j+1)`` from ``j=N_k+1-p`` to ``1``. The constant ``\mathbf{B}`` is the
+contribution for non-zero state ``\mathbf{x̂_{op}}`` and state update ``\mathbf{f̂_{op}}``
+operating points (for linearization, see [`augment_model`](@ref) and [`linearize`](@ref)).
+The method also returns the matrices for the estimation error at arrival:
```math
- \mathbf{x̄} = \mathbf{x̂_0^†}(k-N_k+1) - \mathbf{x̂_0}(k-N_k+1) = \mathbf{e_x̄ Z + f_x̄}
+ \mathbf{x̄} = \mathbf{x̂_0^†}(k-N_k+p) - \mathbf{x̂_0}(k-N_k+p) = \mathbf{e_x̄ Z + f_x̄}
```
-in which ``\mathbf{x̂_0^†}(k-N_k+1) = \mathbf{x̂}_{k-N_k}(k-N_k+1) - \mathbf{x̂_{op}}`` is the
-deviation vector of the state at arrival estimated at time ``k-N_k``. Lastly, the estimated
-states ``\mathbf{x̂_0}(k-j)`` from ``j=N_k-2`` to ``-1``, also in deviation form, are
-calculated with:
+in which ``\mathbf{e_x̄} = [\begin{smallmatrix} -\mathbf{I} & \mathbf{0} & \cdots & \mathbf{0} \end{smallmatrix}]``,
+and ``\mathbf{f_x̄} = \mathbf{x̂_0^†}(k-N_k+p)``. The latter is the deviation vector of the
+state at arrival, estimated at time ``k-N_k``, i.e. ``\mathbf{x̂_0^†}(k-N_k+p) =
+\mathbf{x̂}_{k-N_k}(k-N_k+p) - \mathbf{x̂_{op}}``. Lastly, the estimates ``\mathbf{x̂_0}(k-j+p)``
+from ``j=N_k-1`` to ``0``, also in deviation form, are computed with:
```math
\begin{aligned}
- \mathbf{X̂_0} &= \mathbf{E_x̂ Z + G_x̂ U_0 + J_x̂ D_0} \\
+ \mathbf{X̂_0} &= \mathbf{E_x̂ Z + G_x̂ U_0 + J_x̂ D_0 + B_x̂} \\
&= \mathbf{E_x̂ Z + F_x̂}
\end{aligned}
```
+The matrices ``\mathbf{E, G, J, B, E_x̂, G_x̂, J_x̂, B_x̂}`` are defined in the Extended Help
+section. The vectors ``\mathbf{F, F_x̂, f_x̄}`` are recalculated at each discrete time step,
+see [`initpred!(::MovingHorizonEstimator, ::LinModel)`](@ref) and [`linconstraint!(::MovingHorizonEstimator, ::LinModel)`](@ref).
# Extended Help
!!! details "Extended Help"
- Using the augmented matrices ``\mathbf{Â, B̂_u, Ĉ, B̂_d, D̂_d}``, and the function
- ``\mathbf{S}(j) = ∑_{i=0}^j \mathbf{Â}^i``, the prediction matrices for the sensor
- noises are computed by (notice the minus signs after the equalities):
+ Using the augmented process model matrices ``\mathbf{Â, B̂_u, Ĉ^m, B̂_d, D̂_d^m}``, and the
+ function ``\mathbf{S}(j) = ∑_{i=0}^j \mathbf{Â}^i``, the prediction matrices for the
+ sensor noises depend on the constant ``p``. For ``p=0``, the matrices are computed by:
+ ```math
+ \begin{aligned}
+ \mathbf{E} &= - \begin{bmatrix}
+ \mathbf{Ĉ^m}\mathbf{Â}^{1} & \mathbf{Ĉ^m}\mathbf{Â}^{0} & \cdots & \mathbf{0} \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{2} & \mathbf{Ĉ^m}\mathbf{Â}^{1} & \cdots & \mathbf{0} \\
+ \vdots & \vdots & \ddots & \vdots \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{H_e} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-1} & \cdots & \mathbf{Ĉ^m}\mathbf{Â}^{0} \end{bmatrix} \\
+ \mathbf{G} &= - \begin{bmatrix}
+ \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_u} & \mathbf{0} & \cdots & \mathbf{0} \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{1}\mathbf{B̂_u} & \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_u} & \cdots & \mathbf{0} \\
+ \vdots & \vdots & \ddots & \vdots \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{H_e-1}\mathbf{B̂_u} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-2}\mathbf{B̂_u} & \cdots & \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_u} \end{bmatrix} \\
+ \mathbf{J} &= - \begin{bmatrix}
+ \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_d} & \mathbf{D̂_d^m} & \cdots & \mathbf{0} \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{1}\mathbf{B̂_d} & \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_d} & \cdots & \mathbf{0} \\
+ \vdots & \vdots & \ddots & \vdots \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{H_e-1}\mathbf{B̂_d} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-2}\mathbf{B̂_d} & \cdots & \mathbf{D̂_d^m} \end{bmatrix} \\
+ \mathbf{B} &= - \begin{bmatrix}
+ \mathbf{Ĉ^m S}(0) \\
+ \mathbf{Ĉ^m S}(1) \\
+ \vdots \\
+ \mathbf{Ĉ^m S}(H_e-1) \end{bmatrix} \mathbf{\big(f̂_{op} - x̂_{op}\big)}
+ \end{aligned}
+ ```
+ or, for ``p=1``, the matrices are given by:
```math
\begin{aligned}
\mathbf{E} &= - \begin{bmatrix}
@@ -948,10 +1033,10 @@ calculated with:
\vdots & \vdots & \ddots & \vdots \\
\mathbf{Ĉ^m}\mathbf{Â}^{H_e-2}\mathbf{B̂_u} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-3}\mathbf{B̂_u} & \cdots & \mathbf{0} \end{bmatrix} \\
\mathbf{J} &= - \begin{bmatrix}
- \mathbf{D̂^m} & \mathbf{0} & \cdots & \mathbf{0} \\
- \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_d} & \mathbf{D̂^m} & \cdots & \mathbf{0} \\
+ \mathbf{D̂_d^m} & \mathbf{0} & \cdots & \mathbf{0} \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{0}\mathbf{B̂_d} & \mathbf{D̂_d^m} & \cdots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
- \mathbf{Ĉ^m}\mathbf{Â}^{H_e-2}\mathbf{B̂_d} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-3}\mathbf{B̂_d} & \cdots & \mathbf{D̂^m} \end{bmatrix} \\
+ \mathbf{Ĉ^m}\mathbf{Â}^{H_e-2}\mathbf{B̂_d} & \mathbf{Ĉ^m}\mathbf{Â}^{H_e-3}\mathbf{B̂_d} & \cdots & \mathbf{D̂_d^m} \end{bmatrix} \\
\mathbf{B} &= - \begin{bmatrix}
\mathbf{0} \\
\mathbf{Ĉ^m S}(0) \\
@@ -959,29 +1044,27 @@ calculated with:
\mathbf{Ĉ^m S}(H_e-2) \end{bmatrix} \mathbf{\big(f̂_{op} - x̂_{op}\big)}
\end{aligned}
```
- for the estimation error at arrival:
- ```math
- \mathbf{e_x̄} = \begin{bmatrix}
- -\mathbf{I} & \mathbf{0} & \cdots & \mathbf{0} \end{bmatrix}
- ```
- and, for the estimated states:
+ The matrices for the estimated states are computed by:
```math
\begin{aligned}
\mathbf{E_x̂} &= \begin{bmatrix}
- \mathbf{Â}^{1} & \mathbf{I} & \cdots & \mathbf{0} \\
+ \mathbf{Â}^{1} & \mathbf{A}^{0} & \cdots & \mathbf{0} \\
\mathbf{Â}^{2} & \mathbf{Â}^{1} & \cdots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
- \mathbf{Â}^{H_e} & \mathbf{Â}^{H_e-1} & \cdots & \mathbf{Â}^{1} \end{bmatrix} \\
+ \mathbf{Â}^{H_e} & \mathbf{Â}^{H_e-1} & \cdots & \mathbf{Â}^{0} \end{bmatrix} \\
\mathbf{G_x̂} &= \begin{bmatrix}
\mathbf{Â}^{0}\mathbf{B̂_u} & \mathbf{0} & \cdots & \mathbf{0} \\
\mathbf{Â}^{1}\mathbf{B̂_u} & \mathbf{Â}^{0}\mathbf{B̂_u} & \cdots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{Â}^{H_e-1}\mathbf{B̂_u} & \mathbf{Â}^{H_e-2}\mathbf{B̂_u} & \cdots & \mathbf{Â}^{0}\mathbf{B̂_u} \end{bmatrix} \\
- \mathbf{J_x̂} &= \begin{bmatrix}
+ \mathbf{J_x̂^†} &= \begin{bmatrix}
\mathbf{Â}^{0}\mathbf{B̂_d} & \mathbf{0} & \cdots & \mathbf{0} \\
\mathbf{Â}^{1}\mathbf{B̂_d} & \mathbf{Â}^{0}\mathbf{B̂_d} & \cdots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
- \mathbf{Â}^{H_e-1}\mathbf{B̂_d} & \mathbf{Â}^{H_e-2}\mathbf{B̂_d} & \cdots & \mathbf{Â}^{0}\mathbf{B̂_d} \end{bmatrix} \\
+ \mathbf{Â}^{H_e-1}\mathbf{B̂_d} & \mathbf{Â}^{H_e-2}\mathbf{B̂_d} & \cdots & \mathbf{Â}^{0}\mathbf{B̂_d} \end{bmatrix} \ , \quad
+ \mathbf{J_x̂} = \begin{cases}
+ [\begin{smallmatrix} \mathbf{J_x̂^†} & \mathbf{0} \end{smallmatrix}] & p=0 \\
+ \mathbf{J_x̂^†} & p=1 \end{cases} \\
\mathbf{B_x̂} &= \begin{bmatrix}
\mathbf{S}(0) \\
\mathbf{S}(1) \\
@@ -992,76 +1075,104 @@ calculated with:
All these matrices are truncated when ``N_k < H_e`` (at the beginning).
"""
function init_predmat_mhe(
- model::LinModel{NT}, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op
+ model::LinModel{NT}, He, i_ym, Â, B̂u, Ĉm, B̂d, D̂dm, x̂op, f̂op, p
) where {NT<:Real}
nu, nd = model.nu, model.nd
nym, nx̂ = length(i_ym), size(Â, 2)
nŵ = nx̂
# --- pre-compute matrix powers ---
- # Apow 3D array : Apow[:,:,1] = A^0, Apow[:,:,2] = A^1, ... , Apow[:,:,He+1] = A^He
- Âpow = Array{NT}(undef, nx̂, nx̂, He+1)
- Âpow[:,:,1] = I(nx̂)
+ # Apow3D array : Apow[:,:,1] = A^0, Apow[:,:,2] = A^1, ... , Apow[:,:,He+1] = A^He
+ Âpow3D = Array{NT}(undef, nx̂, nx̂, He+1)
+ Âpow3D[:,:,1] = I(nx̂)
for j=2:He+1
- Âpow[:,:,j] = @views Âpow[:,:,j-1]*Â
+ Âpow3D[:,:,j] = @views Âpow3D[:,:,j-1]*Â
+ end
+ # nĈm_Âpow3D array : similar indices as Apow3D
+ nĈm_Âpow3D = Array{NT}(undef, nym, nx̂, He+1)
+ nĈm_Âpow3D[:,:,1] = -Ĉm
+ for j=2:He+1
+ nĈm_Âpow3D[:,:,j] = @views -Ĉm*Âpow3D[:,:,j]
end
- # Apow_csum 3D array : Apow_csum[:,:,1] = A^0, Apow_csum[:,:,2] = A^1 + A^0, ...
- Âpow_csum = cumsum(Âpow, dims=3)
# helper function to improve code clarity and be similar to eqs. in docstring:
- getpower(array3D, power) = array3D[:,:, power+1]
+ getpower(array3D, power) = @views array3D[:,:, power+1]
# --- decision variables Z ---
- nĈm_Âpow = reduce(vcat, -Ĉm*getpower(Âpow, i) for i=0:He-1)
+ nĈm_Âpow = reduce(vcat, getpower(nĈm_Âpow3D, i) for i=0:He)
E = zeros(NT, nym*He, nx̂ + nŵ*He)
- E[:, 1:nx̂] = nĈm_Âpow
- for j=1:He-1
- iRow = (1 + j*nym):(nym*He)
- iCol = (1:nŵ) .+ (j-1)*nŵ .+ nx̂
- E[iRow, iCol] = nĈm_Âpow[1:length(iRow) ,:]
+ col_begin = iszero(p) ? 1 : 0
+ col_end = iszero(p) ? He : He-1
+ i = 0
+ for j=col_begin:col_end
+ iRow = (1 + i*nym):(nym*He)
+ iCol = (1:nŵ) .+ j*nŵ
+ E[iRow, iCol] = @views nĈm_Âpow[1:length(iRow) ,:]
+ i += 1
end
+ iszero(p) && @views (E[:, 1:nx̂] = @views nĈm_Âpow[nym+1:end, :])
ex̄ = [-I zeros(NT, nx̂, nŵ*He)]
- Âpow_vec = reduce(vcat, getpower(Âpow, i) for i=0:He)
+ Âpow_vec = reduce(vcat, getpower(Âpow3D, i) for i=0:He)
Ex̂ = zeros(NT, nx̂*He, nx̂ + nŵ*He)
- Ex̂[:, 1:nx̂] = Âpow_vec[nx̂+1:end, :]
- for j=0:He-1
- iRow = (1 + j*nx̂):(nx̂*He)
- iCol = (1:nŵ) .+ j*nŵ .+ nx̂
- Ex̂[iRow, iCol] = Âpow_vec[1:length(iRow) ,:]
+ i=0
+ for j=1:He
+ iRow = (1 + i*nx̂):(nx̂*He)
+ iCol = (1:nŵ) .+ j*nŵ
+ Ex̂[iRow, iCol] = @views Âpow_vec[1:length(iRow) ,:]
+ i+=1
end
+ Ex̂[:, 1:nx̂] = @views Âpow_vec[nx̂+1:end, :]
# --- manipulated inputs U ---
- nĈm_Âpow_B̂u = @views reduce(vcat, nĈm_Âpow[(1+(i*nym)):((i+1)*nym),:]*B̂u for i=0:He-1)
+ nĈm_Âpow_B̂u = reduce(vcat, getpower(nĈm_Âpow3D, i)*B̂u for i=0:He-1)
+ nĈm_Âpow_B̂u = [zeros(nym, nu) ; nĈm_Âpow_B̂u]
G = zeros(NT, nym*He, nu*He)
- for j=1:He-1
- iRow = (1 + j*nym):(nym*He)
- iCol = (1:nu) .+ (j-1)*nu
- G[iRow, iCol] = nĈm_Âpow_B̂u[1:length(iRow) ,:]
+ i=0
+ col_begin = iszero(p) ? 1 : 0
+ col_end = iszero(p) ? He-1 : He-2
+ for j=col_begin:col_end
+ iRow = (1 + i*nym):(nym*He)
+ iCol = (1:nu) .+ j*nu
+ G[iRow, iCol] = @views nĈm_Âpow_B̂u[1:length(iRow) ,:]
+ i+=1
end
- Âpow_B̂u = reduce(vcat, getpower(Âpow, i)*B̂u for i=0:He)
+ iszero(p) && @views (G[:, 1:nu] = nĈm_Âpow_B̂u[nym+1:end, :])
+ Âpow_B̂u = reduce(vcat, getpower(Âpow3D, i)*B̂u for i=0:He-1)
Gx̂ = zeros(NT, nx̂*He, nu*He)
for j=0:He-1
iRow = (1 + j*nx̂):(nx̂*He)
iCol = (1:nu) .+ j*nu
- Gx̂[iRow, iCol] = Âpow_B̂u[1:length(iRow) ,:]
+ Gx̂[iRow, iCol] = @views Âpow_B̂u[1:length(iRow) ,:]
end
# --- measured disturbances D ---
- nĈm_Âpow_B̂d = @views reduce(vcat, nĈm_Âpow[(1+(i*nym)):((i+1)*nym),:]*B̂d for i=0:He-1)
- J = repeatdiag(-D̂dm, He)
- for j=1:He-1
- iRow = (1 + j*nym):(nym*He)
- iCol = (1:nd) .+ (j-1)*nd
+ nĈm_Âpow_B̂d = reduce(vcat, getpower(nĈm_Âpow3D, i)*B̂d for i=0:He-1)
+ nĈm_Âpow_B̂d = [-D̂dm; nĈm_Âpow_B̂d]
+ J = zeros(NT, nym*He, nd*(He+1-p))
+ col_begin = iszero(p) ? 1 : 0
+ col_end = iszero(p) ? He+1 : He
+ i=0
+ for j=col_begin:col_end-1
+ iRow = (1 + i*nym):(nym*He)
+ iCol = (1:nd) .+ j*nd
J[iRow, iCol] = nĈm_Âpow_B̂d[1:length(iRow) ,:]
+ i+=1
end
- Âpow_B̂d = reduce(vcat, getpower(Âpow, i)*B̂d for i=0:He)
- Jx̂ = zeros(NT, nx̂*He, nd*He)
+ iszero(p) && @views (J[:, 1:nd] = nĈm_Âpow_B̂d[nym+1:end, :])
+ Âpow_B̂d = reduce(vcat, getpower(Âpow3D, i)*B̂d for i=0:He-1)
+ Jx̂ = zeros(NT, nx̂*He, nd*(He+1-p))
for j=0:He-1
iRow = (1 + j*nx̂):(nx̂*He)
iCol = (1:nd) .+ j*nd
Jx̂[iRow, iCol] = Âpow_B̂d[1:length(iRow) ,:]
end
# --- state x̂op and state update f̂op operating points ---
+ # Apow_csum 3D array : Apow_csum[:,:,1] = A^0, Apow_csum[:,:,2] = A^1 + A^0, ...
+ Âpow_csum = cumsum(Âpow3D, dims=3)
f̂_op_n_x̂op = (f̂op - x̂op)
coef_B = zeros(NT, nym*He, nx̂)
- for j=1:He-1
- iRow = (1:nym) .+ nym*j
- coef_B[iRow,:] = -Ĉm*getpower(Âpow_csum, j-1)
+ row_begin = iszero(p) ? 0 : 1
+ row_end = iszero(p) ? He-1 : He-2
+ j=0
+ for i=row_begin:row_end
+ iRow = (1:nym) .+ nym*i
+ coef_B[iRow,:] = -Ĉm*getpower(Âpow_csum, j)
+ j+=1
end
B = coef_B*f̂_op_n_x̂op
coef_Bx̂ = Matrix{NT}(undef, nx̂*He, nx̂)
@@ -1075,7 +1186,7 @@ end
"Return empty matrices if `model` is not a [`LinModel`](@ref), except for `ex̄`."
function init_predmat_mhe(
- model::SimModel{NT}, He, i_ym, Â, _ , _ , _ , _ , _ , _
+ model::SimModel{NT}, He, i_ym, Â, _ , _ , _ , _ , _ , _ , p
) where {NT<:Real}
nym, nx̂ = length(i_ym), size(Â, 2)
nŵ = nx̂
@@ -1084,7 +1195,7 @@ function init_predmat_mhe(
Ex̂ = zeros(NT, 0, nx̂ + nŵ*He)
G = zeros(NT, 0, model.nu*He)
Gx̂ = zeros(NT, 0, model.nu*He)
- J = zeros(NT, 0, model.nd*He)
+ J = zeros(NT, 0, model.nd*(He+1-p))
Jx̂ = zeros(NT, 0, model.nd*He)
B = zeros(NT, nym*He)
Bx̂ = zeros(NT, nx̂*He)
diff --git a/src/estimator/mhe/execute.jl b/src/estimator/mhe/execute.jl
index 9d0bcc4f..3126f79f 100644
--- a/src/estimator/mhe/execute.jl
+++ b/src/estimator/mhe/execute.jl
@@ -1,8 +1,5 @@
"Reset the data windows and time-varying variables for the moving horizon estimator."
-function init_estimate_cov!(estim::MovingHorizonEstimator, _ , _ , _ )
- estim.invP̄ .= inv(estim.P̂_0)
- estim.P̂arr_old .= estim.P̂_0
- estim.x̂0arr_old .= 0
+function init_estimate_cov!(estim::MovingHorizonEstimator, _ , d0, u0)
estim.Z̃ .= 0
estim.X̂0 .= 0
estim.Y0m .= 0
@@ -13,6 +10,15 @@ function init_estimate_cov!(estim::MovingHorizonEstimator, _ , _ , _ )
estim.H̃ .= 0
estim.q̃ .= 0
estim.p .= 0
+ if estim.direct
+ # add u0(-1) and d0(-1) to the data windows:
+ estim.U0[1:estim.model.nu] .= u0
+ estim.D0[1:estim.model.nd] .= d0
+ end
+ # estim.P̂_0 is in fact P̂(-1|-1) is estim.direct==false, else P̂(-1|0)
+ estim.invP̄ .= inv(estim.P̂_0)
+ estim.P̂arr_old .= estim.P̂_0
+ estim.x̂0arr_old .= 0
return nothing
end
@@ -22,6 +28,13 @@ end
Do the same but for [`MovingHorizonEstimator`](@ref) objects.
"""
function correct_estimate!(estim::MovingHorizonEstimator, y0m, d0)
+ if estim.direct
+ ismoving = add_data_windows!(estim, y0m, d0)
+ ismoving && correct_cov!(estim)
+ initpred!(estim, estim.model)
+ linconstraint!(estim, estim.model)
+ optim_objective!(estim)
+ end
return nothing
end
@@ -31,79 +44,32 @@ end
Update [`MovingHorizonEstimator`](@ref) state `estim.x̂0`.
The optimization problem of [`MovingHorizonEstimator`](@ref) documentation is solved at
-each discrete time ``k``. Once solved, the next estimate ``\mathbf{x̂}_k(k+1)`` is computed
-by inserting the optimal values of ``\mathbf{x̂}_k(k-N_k+1)`` and ``\mathbf{Ŵ}`` in the
-augmented model from ``j = N_k-1`` to ``0`` inclusively. Afterward, if ``k ≥ H_e``, the
-arrival covariance for the next time step ``\mathbf{P̂}_{k-N_k+1}(k-N_k+2)`` is estimated
-using `estim.covestim` object.
+each discrete time ``k``. The prediction matrices are provided at [`init_predmat_mhe`](@ref)
+documentation. Once solved, the optimal estimate ``\mathbf{x̂}_k(k+p)`` is computed by
+inserting the optimal values of ``\mathbf{x̂}_k(k-N_k+p)`` and ``\mathbf{Ŵ}`` in the
+augmented model from ``j = N_k-1`` to ``0`` inclusively. Afterward, if ``N_k = H_e``, the
+arrival covariance for the next time step ``\mathbf{P̂}_{k-N_k}(k-N_k+1)`` is estimated using
+`estim.covestim` object.
"""
-function update_estimate!(estim::MovingHorizonEstimator{NT}, y0m, d0, u0) where NT<:Real
- model, optim, x̂0 = estim.model, estim.optim, estim.x̂0
- add_data_windows!(estim::MovingHorizonEstimator, u0, d0, y0m)
- initpred!(estim, model)
- linconstraint!(estim, model)
- nu, ny = model.nu, model.ny
- nx̂, nym, nŵ, nϵ, Nk = estim.nx̂, estim.nym, estim.nx̂, estim.nϵ, estim.Nk[]
- nx̃ = nϵ + nx̂
- Z̃var::Vector{JuMP.VariableRef} = optim[:Z̃var]
- V̂ = Vector{NT}(undef, nym*Nk)
- X̂0 = Vector{NT}(undef, nx̂*Nk)
- û0 = Vector{NT}(undef, nu)
- ŷ0 = Vector{NT}(undef, ny)
- x̄ = Vector{NT}(undef, nx̂)
- ϵ_0 = estim.nϵ ≠ 0 ? estim.Z̃[begin] : empty(estim.Z̃)
- Z̃_0 = [ϵ_0; estim.x̂0arr_old; estim.Ŵ]
- V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, Z̃_0)
- J_0 = obj_nonlinprog!(x̄, estim, model, V̂, Z̃_0)
- # initial Z̃0 with Ŵ=0 if objective or constraint function not finite :
- isfinite(J_0) || (Z̃_0 = [ϵ_0; estim.x̂0arr_old; zeros(NT, nŵ*estim.He)])
- JuMP.set_start_value.(Z̃var, Z̃_0)
- # ------- solve optimization problem --------------
- try
- JuMP.optimize!(optim)
- catch err
- if isa(err, MOI.UnsupportedAttribute{MOI.VariablePrimalStart})
- # reset_optimizer to unset warm-start, set_start_value.(nothing) seems buggy
- MOIU.reset_optimizer(optim)
- JuMP.optimize!(optim)
- else
- rethrow(err)
- end
+function update_estimate!(estim::MovingHorizonEstimator, y0m, d0, u0)
+ if !estim.direct
+ add_data_windows!(estim, y0m, d0, u0)
+ initpred!(estim, estim.model)
+ linconstraint!(estim, estim.model)
+ optim_objective!(estim)
end
- # -------- error handling -------------------------
- if !issolved(optim)
- status = JuMP.termination_status(optim)
- if iserror(optim)
- @error("MHE terminated without solution: estimation in open-loop",
- status)
- else
- @warn("MHE termination status not OPTIMAL or LOCALLY_SOLVED: keeping "*
- "solution anyway", status)
- end
- @debug JuMP.solution_summary(optim, verbose=true)
- end
- if iserror(optim)
- estim.Z̃ .= Z̃_0
- else
- estim.Z̃ .= JuMP.value.(Z̃var)
- end
- # --------- update estimate -----------------------
- estim.Ŵ[1:nŵ*Nk] .= @views estim.Z̃[nx̃+1:nx̃+nŵ*Nk] # update Ŵ with optimum for warm-start
- V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, estim.Z̃)
- Nk == estim.He && update_cov!(estim::MovingHorizonEstimator)
- x̂0next = X̂0[end-nx̂+1:end]
- estim.x̂0 .= x̂0next
+ (estim.Nk[] == estim.He) && update_cov!(estim)
return nothing
end
-
@doc raw"""
getinfo(estim::MovingHorizonEstimator) -> info
Get additional info on `estim` [`MovingHorizonEstimator`](@ref) optimum for troubleshooting.
-The function should be called after calling [`updatestate!`](@ref). It returns the
-dictionary `info` with the following fields:
+If `estim.direct==true`, the function should be called after calling [`preparestate!`](@ref)
+Otherwise, call it after [`updatestate!`](@ref). It returns the dictionary `info` with the
+following fields:
!!! info
Fields with *`emphasis`* are non-Unicode alternatives.
@@ -117,7 +83,7 @@ dictionary `info` with the following fields:
- `:x̄` or *`:xbar`* : optimal estimation error at arrival, ``\mathbf{x̄}``
- `:Ŷ` or *`:Yhat`* : optimal estimated outputs over ``N_k``, ``\mathbf{Ŷ}``
- `:Ŷm` or *`:Yhatm`* : optimal estimated measured outputs over ``N_k``, ``\mathbf{Ŷ^m}``
-- `:x̂arr` or *`:xhatarr`* : optimal estimated state at arrival, ``\mathbf{x̂}_k(k-N_k+1)``
+- `:x̂arr` or *`:xhatarr`* : optimal estimated state at arrival, ``\mathbf{x̂}_k(k-N_k+p)``
- `:J` : objective value optimum, ``J``
- `:Ym` : measured outputs over ``N_k``, ``\mathbf{Y^m}``
- `:U` : manipulated inputs over ``N_k``, ``\mathbf{U}``
@@ -195,36 +161,53 @@ function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real
return info
end
-"Add data to the observation windows of the moving horizon estimator."
-function add_data_windows!(estim::MovingHorizonEstimator, u0, d0, y0m)
+"""
+ add_data_windows!(estim::MovingHorizonEstimator, y0m, d0, u0=estim.lastu0) -> ismoving
+
+Add data to the observation windows of the moving horizon estimator and clamp `estim.Nk`.
+
+If ``k ≥ H_e``, the observation windows are moving in time and `estim.Nk` is clamped to
+`estim.He`. It returns `true` if the observation windows are moving, `false` otherwise.
+If no `u0` argument is provided, the manipulated input of the last time step is added to its
+window (the correct value if `estim.direct == true`).
+"""
+function add_data_windows!(estim::MovingHorizonEstimator, y0m, d0, u0=estim.lastu0)
model = estim.model
- nx̂, nym, nu, nd, nŵ = estim.nx̂, estim.nym, model.nu, model.nd, estim.nx̂
- x̂0, ŵ = estim.x̂0, zeros(nŵ) # ŵ(k) = 0 for warm-starting
+ nx̂, nym, nd, nu, nŵ = estim.nx̂, estim.nym, model.nd, model.nu, estim.nx̂
+ Nk = estim.Nk[]
+ p = estim.direct ? 0 : 1
+ x̂0, ŵ = estim.x̂0, 0 # ŵ(k-1+p) = 0 for warm-start
estim.Nk .+= 1
Nk = estim.Nk[]
- if Nk > estim.He
- estim.X̂0[1:end-nx̂] .= @views estim.X̂0[nx̂+1:end]
- estim.X̂0[end-nx̂+1:end] .= x̂0
+ ismoving = (Nk > estim.He)
+ if ismoving
estim.Y0m[1:end-nym] .= @views estim.Y0m[nym+1:end]
estim.Y0m[end-nym+1:end] .= y0m
+ if nd ≠ 0
+ estim.D0[1:end-nd] .= @views estim.D0[nd+1:end]
+ estim.D0[end-nd+1:end] .= d0
+ end
estim.U0[1:end-nu] .= @views estim.U0[nu+1:end]
estim.U0[end-nu+1:end] .= u0
- estim.D0[1:end-nd] .= @views estim.D0[nd+1:end]
- estim.D0[end-nd+1:end] .= d0
+ estim.X̂0[1:end-nx̂] .= @views estim.X̂0[nx̂+1:end]
+ estim.X̂0[end-nx̂+1:end] .= x̂0
estim.Ŵ[1:end-nŵ] .= @views estim.Ŵ[nŵ+1:end]
estim.Ŵ[end-nŵ+1:end] .= ŵ
estim.Nk .= estim.He
else
- estim.X̂0[(1 + nx̂*(Nk-1)):(nx̂*Nk)] .= x̂0
- estim.Y0m[(1 + nym*(Nk-1)):(nym*Nk)] .= y0m
- estim.U0[(1 + nu*(Nk-1)):(nu*Nk)] .= u0
- estim.D0[(1 + nd*(Nk-1)):(nd*Nk)] .= d0
- estim.Ŵ[(1 + nŵ*(Nk-1)):(nŵ*Nk)] .= ŵ
+ estim.Y0m[(1 + nym*(Nk-1)):(nym*Nk)] .= y0m
+ if nd ≠ 0
+ # D0 include 1 additional measured disturbance if direct==true (p==0):
+ estim.D0[(1 + nd*(Nk-p)):(nd*Nk+1-p)] .= d0
+ end
+ estim.U0[(1 + nu*(Nk-1)):(nu*Nk)] .= u0
+ estim.X̂0[(1 + nx̂*(Nk-1)):(nx̂*Nk)] .= x̂0
+ estim.Ŵ[(1 + nŵ*(Nk-1)):(nŵ*Nk)] .= ŵ
end
estim.x̂0arr_old .= @views estim.X̂0[1:nx̂]
- return nothing
+ return ismoving
end
-
+
@doc raw"""
initpred!(estim::MovingHorizonEstimator, model::LinModel) -> nothing
@@ -366,6 +349,96 @@ function trunc_bounds(estim::MovingHorizonEstimator{NT}, Bmin, Bmax, n) where NT
return Bmin_t, Bmax_t
end
+@doc raw"""
+ optim_objective!(estim::MovingHorizonEstimator) -> Z̃
+
+Optimize objective of `estim` [`MovingHorizonEstimator`](@ref) and return the solution `Z̃`.
+
+If supported by `estim.optim`, it warm-starts the solver at:
+```math
+\mathbf{Z̃} =
+\begin{bmatrix}
+ ϵ_{k-1} \\
+ \mathbf{x̂}_{k-1}(k-N_k+p) \\
+ \mathbf{ŵ}_{k-1}(k-N_k+p+0) \\
+ \mathbf{ŵ}_{k-1}(k-N_k+p+1) \\
+ \vdots \\
+ \mathbf{ŵ}_{k-1}(k-p-2) \\
+ \mathbf{0} \\
+\end{bmatrix}
+```
+where ``\mathbf{ŵ}_{k-1}(k-j)`` is the input increment for time ``k-j`` computed at the
+last time step ``k-1``. It then calls `JuMP.optimize!(estim.optim)` and extract the
+solution. A failed optimization prints an `@error` log in the REPL and returns the
+warm-start value.
+"""
+function optim_objective!(estim::MovingHorizonEstimator{NT}) where NT<:Real
+ model, optim = estim.model, estim.optim
+ nu, ny = model.nu, model.ny
+ nx̂, nym, nŵ, nϵ, Nk = estim.nx̂, estim.nym, estim.nx̂, estim.nϵ, estim.Nk[]
+ nx̃ = nϵ + nx̂
+ Z̃var::Vector{JuMP.VariableRef} = optim[:Z̃var]
+ V̂ = Vector{NT}(undef, nym*Nk)
+ X̂0 = Vector{NT}(undef, nx̂*Nk)
+ û0 = Vector{NT}(undef, nu)
+ ŷ0 = Vector{NT}(undef, ny)
+ x̄ = Vector{NT}(undef, nx̂)
+ ϵ_0 = estim.nϵ ≠ 0 ? estim.Z̃[begin] : empty(estim.Z̃)
+ Z̃_0 = [ϵ_0; estim.x̂0arr_old; estim.Ŵ]
+ V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, Z̃_0)
+ J_0 = obj_nonlinprog!(x̄, estim, model, V̂, Z̃_0)
+ # initial Z̃0 with Ŵ=0 if objective or constraint function not finite :
+ isfinite(J_0) || (Z̃_0 = [ϵ_0; estim.x̂0arr_old; zeros(NT, nŵ*estim.He)])
+ JuMP.set_start_value.(Z̃var, Z̃_0)
+ # ------- solve optimization problem --------------
+ try
+ JuMP.optimize!(optim)
+ catch err
+ if isa(err, MOI.UnsupportedAttribute{MOI.VariablePrimalStart})
+ # reset_optimizer to unset warm-start, set_start_value.(nothing) seems buggy
+ MOIU.reset_optimizer(optim)
+ JuMP.optimize!(optim)
+ else
+ rethrow(err)
+ end
+ end
+ # -------- error handling -------------------------
+ if !issolved(optim)
+ status = JuMP.termination_status(optim)
+ if iserror(optim)
+ @error("MHE terminated without solution: estimation in open-loop",
+ status)
+ else
+ @warn("MHE termination status not OPTIMAL or LOCALLY_SOLVED: keeping "*
+ "solution anyway", status)
+ end
+ @debug JuMP.solution_summary(optim, verbose=true)
+ end
+ if iserror(optim)
+ estim.Z̃ .= Z̃_0
+ else
+ estim.Z̃ .= JuMP.value.(Z̃var)
+ end
+ # --------- update estimate -----------------------
+ estim.Ŵ[1:nŵ*Nk] .= @views estim.Z̃[nx̃+1:nx̃+nŵ*Nk] # update Ŵ with optimum for warm-start
+ V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, estim.Z̃)
+ x̂0next = @views X̂0[end-nx̂+1:end]
+ estim.x̂0 .= x̂0next
+ return estim.Z̃
+end
+
+"Correct the covariance estimate at arrival using `covestim` [`StateEstimator`](@ref)."
+function correct_cov!(estim::MovingHorizonEstimator)
+ nym, nd = estim.nym, estim.model.nd
+ y0marr, d0arr = @views estim.Y0m[1:nym], estim.D0[1:nd]
+ estim.covestim.x̂0 .= estim.x̂0arr_old
+ estim.covestim.P̂ .= estim.P̂arr_old
+ correct_estimate!(estim.covestim, y0marr, d0arr)
+ estim.P̂arr_old .= estim.covestim.P̂
+ estim.invP̄ .= inv(estim.P̂arr_old)
+ return nothing
+end
+
"Update the covariance estimate at arrival using `covestim` [`StateEstimator`](@ref)."
function update_cov!(estim::MovingHorizonEstimator)
nu, nd, nym = estim.model.nu, estim.model.nd, estim.nym
@@ -433,18 +506,36 @@ function predict!(V̂, X̂0, û0, ŷ0, estim::MovingHorizonEstimator, model::S
nu, nd, nx̂, nŵ, nym = model.nu, model.nd, estim.nx̂, estim.nx̂, estim.nym
nx̃ = nϵ + nx̂
x̂0 = @views Z̃[nx̃-nx̂+1:nx̃]
- for j=1:Nk
- u0 = @views estim.U0[ (1 + nu * (j-1)):(nu*j)]
- y0m = @views estim.Y0m[(1 + nym * (j-1)):(nym*j)]
- d0 = @views estim.D0[ (1 + nd * (j-1)):(nd*j)]
- ŵ = @views Z̃[(1 + nx̃ + nŵ*(j-1)):(nx̃ + nŵ*j)]
- ĥ!(ŷ0, estim, model, x̂0, d0)
- ŷ0m = @views ŷ0[estim.i_ym]
- V̂[(1 + nym*(j-1)):(nym*j)] .= y0m .- ŷ0m
- x̂0next = @views X̂0[(1 + nx̂ *(j-1)):(nx̂ *j)]
- f̂!(x̂0next, û0, estim, model, x̂0, u0, d0)
- x̂0next .+= ŵ .+ estim.f̂op .- estim.x̂op
- x̂0 = x̂0next
+ if estim.direct
+ ŷ0next = ŷ0
+ d0 = @views estim.D0[1:nd]
+ for j=1:Nk
+ u0 = @views estim.U0[ (1 + nu * (j-1)):(nu*j)]
+ ŵ = @views Z̃[(1 + nx̃ + nŵ*(j-1)):(nx̃ + nŵ*j)]
+ x̂0next = @views X̂0[(1 + nx̂ *(j-1)):(nx̂ *j)]
+ f̂!(x̂0next, û0, estim, model, x̂0, u0, d0)
+ x̂0next .+= ŵ .+ estim.f̂op .- estim.x̂op
+ y0nextm = @views estim.Y0m[(1 + nym * (j-1)):(nym*j)]
+ d0next = @views estim.D0[(1 + nd*j):(nd*(j+1))]
+ ĥ!(ŷ0next, estim, model, x̂0next, d0next)
+ ŷ0nextm = @views ŷ0next[estim.i_ym]
+ V̂[(1 + nym*(j-1)):(nym*j)] .= y0nextm .- ŷ0nextm
+ x̂0, d0 = x̂0next, d0next
+ end
+ else
+ for j=1:Nk
+ y0m = @views estim.Y0m[(1 + nym * (j-1)):(nym*j)]
+ d0 = @views estim.D0[ (1 + nd * (j-1)):(nd*j)]
+ u0 = @views estim.U0[ (1 + nu * (j-1)):(nu*j)]
+ ŵ = @views Z̃[(1 + nx̃ + nŵ*(j-1)):(nx̃ + nŵ*j)]
+ ĥ!(ŷ0, estim, model, x̂0, d0)
+ ŷ0m = @views ŷ0[estim.i_ym]
+ V̂[(1 + nym*(j-1)):(nym*j)] .= y0m .- ŷ0m
+ x̂0next = @views X̂0[(1 + nx̂ *(j-1)):(nx̂ *j)]
+ f̂!(x̂0next, û0, estim, model, x̂0, u0, d0)
+ x̂0next .+= ŵ .+ estim.f̂op .- estim.x̂op
+ x̂0 = x̂0next
+ end
end
return V̂, X̂0
end
@@ -481,6 +572,8 @@ function con_nonlinprog!(g, estim::MovingHorizonEstimator, ::SimModel, X̂0, V̂
return g
end
+"No nonlinear constraints if `model` is a [`LinModel`](@ref), return `g` unchanged."
+con_nonlinprog!(g, ::MovingHorizonEstimator, ::LinModel, _ , _ , _ ) = g
"Update the augmented model, prediction matrices, constrains and data windows for MHE."
function setmodel_estimator!(
@@ -505,10 +598,11 @@ function setmodel_estimator!(
estim.f̂op .= f̂op
estim.x̂0 .-= estim.x̂op # convert x̂ to x̂0 with the new operating point
# --- predictions matrices ---
+ p = estim.direct ? 0 : 1
E, G, J, B, _ , Ex̂, Gx̂, Jx̂, Bx̂ = init_predmat_mhe(
model, He, estim.i_ym,
estim.Â, estim.B̂u, estim.Ĉm, estim.B̂d, estim.D̂dm,
- estim.x̂op, estim.f̂op
+ estim.x̂op, estim.f̂op, p
)
A_X̂min, A_X̂max, Ẽx̂ = relaxX̂(model, nϵ, con.C_x̂min, con.C_x̂max, Ex̂)
A_V̂min, A_V̂max, Ẽ = relaxV̂(model, nϵ, con.C_v̂min, con.C_v̂max, E)
@@ -592,7 +686,4 @@ function setmodel_estimator!(
end
"Called by plots recipes for the estimated states constraints."
-getX̂con(estim::MovingHorizonEstimator, _ ) = estim.con.X̂0min+estim.X̂op, estim.con.X̂0max+estim.X̂op
-
-"No nonlinear constraints if `model` is a [`LinModel`](@ref), return `g` unchanged."
-con_nonlinprog!(g, ::MovingHorizonEstimator, ::LinModel, _ , _ , _ ) = g
\ No newline at end of file
+getX̂con(estim::MovingHorizonEstimator, _ ) = estim.con.X̂0min+estim.X̂op, estim.con.X̂0max+estim.X̂op
\ No newline at end of file
diff --git a/src/sim_model.jl b/src/sim_model.jl
index 4f52353b..39f17f60 100644
--- a/src/sim_model.jl
+++ b/src/sim_model.jl
@@ -189,7 +189,8 @@ detailstr(model::SimModel) = ""
Init `model.x0` with manipulated inputs `u` and measured disturbances `d` steady-state.
-It removes the operating points on `u` and `d` and calls [`steadystate!`](@ref):
+The method tries to initialize the model state ``\mathbf{x}`` at steady-state. It removes
+the operating points on `u` and `d` and calls [`steadystate!`](@ref):
- If `model` is a [`LinModel`](@ref), the method computes the steady-state of current
inputs `u` and measured disturbances `d`.
@@ -310,7 +311,7 @@ ms. Can be used to implement simple soft real-time simulations, see the example
!!! warning
The allocations in Julia are garbage-collected (GC) automatically. This can affect the
- timings. In such cases, you can temporarily stop the GC with `GC.enable(false)`, and
+ timing. In such cases, you can temporarily stop the GC with `GC.enable(false)`, and
restart it at a convenient time e.g.: just before calling `periodsleep`.
# Examples
diff --git a/test/runtests.jl b/test/runtests.jl
index 99c8c8a5..9ab60014 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -4,6 +4,7 @@ using ModelPredictiveControl
using ControlSystemsBase
using Documenter
using LinearAlgebra
+using Random: randn
using JuMP, OSQP, Ipopt, DAQP, ForwardDiff
using Plots
using Test
diff --git a/test/test_predictive_control.jl b/test/test_predictive_control.jl
index 5424e92f..993994f7 100644
--- a/test/test_predictive_control.jl
+++ b/test/test_predictive_control.jl
@@ -459,8 +459,8 @@ end
linmodel1 = LinModel(sys,Ts,i_d=[3])
nmpc0 = NonLinMPC(linmodel1, Hp=15)
@test isa(nmpc0.estim, SteadyKalmanFilter)
- f(x,u,d) = linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
- h(x,d) = linmodel1.C*x + linmodel1.Dd*d
+ f = (x,u,d) -> linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
+ h = (x,d) -> linmodel1.C*x + linmodel1.Dd*d
nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
nmpc1 = NonLinMPC(nonlinmodel, Hp=15)
@test isa(nmpc1.estim, UnscentedKalmanFilter)
@@ -528,8 +528,8 @@ end
# ensure that the current estimated output is updated for correct JE values:
@test nmpc.ŷ ≈ ModelPredictiveControl.evalŷ(nmpc.estim, Float64[])
linmodel2 = LinModel([tf(5, [2000, 1]) tf(7, [8000,1])], 3000.0, i_d=[2])
- f(x,u,d) = linmodel2.A*x + linmodel2.Bu*u + linmodel2.Bd*d
- h(x,d) = linmodel2.C*x + linmodel2.Dd*d
+ f = (x,u,d) -> linmodel2.A*x + linmodel2.Bu*u + linmodel2.Bd*d
+ h = (x,d) -> linmodel2.C*x + linmodel2.Dd*d
nonlinmodel = NonLinModel(f, h, 3000.0, 1, 2, 1, 1, solver=nothing)
nmpc2 = NonLinMPC(nonlinmodel, Nwt=[0], Hp=1000, Hc=1)
d = [0.1]
@@ -606,8 +606,8 @@ end
@testset "NonLinMPC other methods" begin
linmodel = setop!(LinModel(sys,Ts,i_u=[1,2]), uop=[10,50], yop=[50,30])
- f(x,u,_) = linmodel.A*x + linmodel.Bu*u
- h(x,_) = linmodel.C*x
+ f = (x,u,_) -> linmodel.A*x + linmodel.Bu*u
+ h = (x,_) -> linmodel.C*x
nonlinmodel = NonLinModel(f, h, Ts, 2, 2, 2, solver=nothing)
nmpc1 = NonLinMPC(nonlinmodel, Hp=15)
@test initstate!(nmpc1, [10, 50], [20, 25]) ≈ zeros(4)
@@ -628,8 +628,8 @@ end
setconstraint!(nmpc_lin, c_ymin=[1.0,1.1], c_ymax=[1.2,1.3])
@test all((-nmpc_lin.con.A_Ymin[:, end], -nmpc_lin.con.A_Ymax[:, end]) .≈ ([1.0,1.1], [1.2,1.3]))
- f(x,u,d) = linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
- h(x,d) = linmodel1.C*x + linmodel1.Dd*d
+ f = (x,u,d) -> linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
+ h = (x,d) -> linmodel1.C*x + linmodel1.Dd*d
nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
nmpc = NonLinMPC(nonlinmodel, Hp=1, Hc=1)
@@ -690,8 +690,8 @@ end
info = getinfo(nmpc_lin)
@test info[:x̂end][1] ≈ 0 atol=1e-1
- f(x,u,_) = linmodel.A*x + linmodel.Bu*u
- h(x,_) = linmodel.C*x
+ f = (x,u,_) -> linmodel.A*x + linmodel.Bu*u
+ h = (x,_) -> linmodel.C*x
nonlinmodel = NonLinModel(f, h, linmodel.Ts, 1, 1, 1, solver=nothing)
nmpc = NonLinMPC(nonlinmodel, Hp=50, Hc=5)
@@ -764,8 +764,8 @@ end
@test mpc.M_Hp ≈ diagm(1:1000)
@test mpc.Ñ_Hc ≈ diagm([0.1;1e6])
@test mpc.L_Hp ≈ diagm(1.1:1000.1)
- f(x,u,d) = estim.model.A*x + estim.model.Bu*u + estim.model.Bd*d
- h(x,d) = estim.model.C*x + estim.model.Du*d
+ f = (x,u,d) -> estim.model.A*x + estim.model.Bu*u + estim.model.Bd*d
+ h = (x,d) -> estim.model.C*x + estim.model.Du*d
nonlinmodel = NonLinModel(f, h, 10.0, 1, 1, 1)
nmpc = NonLinMPC(nonlinmodel, Nwt=[0], Cwt=1e4, Hp=1000, Hc=10)
setmodel!(nmpc, Mwt=[100], Nwt=[200], Lwt=[300])
@@ -779,3 +779,41 @@ end
@test_throws ErrorException setmodel!(nmpc, deepcopy(nonlinmodel))
end
+@testset "LinMPC v.s. NonLinMPC" begin
+ linmodel = setop!(LinModel(sys,Ts,i_d=[3]), uop=[10,50], yop=[50,30], dop=[20])
+ f = (x,u,d) -> linmodel.A*x + linmodel.Bu*u + linmodel.Bd*d
+ h = (x,d) -> linmodel.C*x + linmodel.Dd*d
+ nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
+ nonlinmodel = setop!(nonlinmodel, uop=[10,50], yop=[50,30], dop=[20])
+ optim = JuMP.Model(optimizer_with_attributes(Ipopt.Optimizer, "sb"=>"yes"))
+ linmpc = LinMPC(KalmanFilter(linmodel), Hp=15, optim=optim)
+ linmpc = setconstraint!(linmpc, ymax=[55,35], Δumax=[5, 5])
+ nonlinmpc1 = NonLinMPC(UnscentedKalmanFilter(nonlinmodel), Hp=15)
+ nonlinmpc1 = setconstraint!(nonlinmpc1, ymax=[55,35], Δumax=[5, 5])
+ nonlinmpc2 = NonLinMPC(KalmanFilter(linmodel), Hp=15)
+ nonlinmpc2 = setconstraint!(nonlinmpc2, ymax=[55,35], Δumax=[5, 5])
+ U_linmpc = zeros(2, 30)
+ U_nonlinmpc1 = zeros(2, 30)
+ U_nonlinmpc2 = zeros(2, 30)
+ for i=1:30
+ r = [55, 35]
+ d = i > 15 ? [0] : [20]
+ y = linmodel(d) + randn(2)
+ preparestate!(linmpc, y, d)
+ preparestate!(nonlinmpc1, y, d)
+ preparestate!(nonlinmpc2, y, d)
+ u1 = moveinput!(linmpc, r, d)
+ u2 = moveinput!(nonlinmpc1, r, d)
+ u3 = moveinput!(nonlinmpc2, r, d)
+ U_linmpc[:, i] = u1
+ U_nonlinmpc1[:, i] = u2
+ U_nonlinmpc2[:, i] = u3
+ updatestate!(linmpc, u1, y, d)
+ updatestate!(nonlinmpc1, u2, y, d)
+ updatestate!(nonlinmpc2, u3, y, d)
+ updatestate!(linmodel, u1, d)
+ end
+ @test U_linmpc ≈ U_nonlinmpc1 rtol=1e-3 atol=1e-3
+ @test U_linmpc ≈ U_nonlinmpc2 rtol=1e-3 atol=1e-3
+ @test U_nonlinmpc1 ≈ U_nonlinmpc2 rtol=1e-3 atol=1e-3
+end
diff --git a/test/test_state_estim.jl b/test/test_state_estim.jl
index 37221537..d3d8da40 100644
--- a/test/test_state_estim.jl
+++ b/test/test_state_estim.jl
@@ -82,23 +82,23 @@ end
@test updatestate!(skalmanfilter2, [10, 3], [0.5, 6+0.1]) ≈ x
setstate!(skalmanfilter1, [1,2,3,4])
@test skalmanfilter1.x̂0 ≈ [1,2,3,4]
- for i in 1:100
+ for i in 1:40
preparestate!(skalmanfilter1, [50, 30])
updatestate!(skalmanfilter1, [11, 52], [50, 30])
end
@test skalmanfilter1() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(skalmanfilter1, [51, 32])
updatestate!(skalmanfilter1, [10, 50], [51, 32])
end
@test skalmanfilter1() ≈ [51, 32] atol=1e-3
skalmanfilter2 = SteadyKalmanFilter(linmodel1, nint_u=[1, 1], direct=false)
- for i in 1:100
+ for i in 1:40
preparestate!(skalmanfilter2, [50, 30])
updatestate!(skalmanfilter2, [11, 52], [50, 30])
end
@test skalmanfilter2() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(skalmanfilter2, [51, 32])
updatestate!(skalmanfilter2, [10, 50], [51, 32])
end
@@ -196,23 +196,23 @@ end
@test initstate!(kalmanfilter1, [10, 50], [50, 30+1]) ≈ [zeros(3); [1]]
setstate!(kalmanfilter1, [1,2,3,4])
@test kalmanfilter1.x̂0 ≈ [1,2,3,4]
- for i in 1:1000
+ for i in 1:40
preparestate!(kalmanfilter1, [50, 30])
updatestate!(kalmanfilter1, [11, 52], [50, 30])
end
@test kalmanfilter1() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(kalmanfilter1, [51, 32])
updatestate!(kalmanfilter1, [10, 50], [51, 32])
end
@test kalmanfilter1() ≈ [51, 32] atol=1e-3
kalmanfilter2 = KalmanFilter(linmodel1, nint_u=[1, 1], direct=false)
- for i in 1:100
+ for i in 1:40
preparestate!(kalmanfilter2, [50, 30])
updatestate!(kalmanfilter2, [11, 52], [50, 30])
end
@test kalmanfilter2() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(kalmanfilter2, [51, 32])
updatestate!(kalmanfilter2, [10, 50], [51, 32])
end
@@ -305,23 +305,23 @@ end
@test initstate!(lo1, [10, 50], [50, 30+1]) ≈ [zeros(3); [1]]
setstate!(lo1, [1,2,3,4])
@test lo1.x̂0 ≈ [1,2,3,4]
- for i in 1:100
+ for i in 1:40
preparestate!(lo1, [50, 30])
updatestate!(lo1, [11, 52], [50, 30])
end
@test lo1() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(lo1, [51, 32])
updatestate!(lo1, [10, 50], [51, 32])
end
@test lo1() ≈ [51, 32] atol=1e-3
lo2 = Luenberger(linmodel1, nint_u=[1, 1], direct=false)
- for i in 1:100
+ for i in 1:40
preparestate!(lo2, [50, 30])
updatestate!(lo2, [11, 52], [50, 30])
end
@test lo2() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(lo2, [51, 32])
updatestate!(lo2, [10, 50], [51, 32])
end
@@ -537,23 +537,23 @@ end
@test initstate!(ukf1, [10, 50], [50, 30+1]) ≈ zeros(4) atol=1e-9
setstate!(ukf1, [1,2,3,4])
@test ukf1.x̂0 ≈ [1,2,3,4]
- for i in 1:100
+ for i in 1:40
preparestate!(ukf1, [50, 30])
updatestate!(ukf1, [11, 52], [50, 30])
end
@test ukf1() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(ukf1, [51, 32])
updatestate!(ukf1, [10, 50], [51, 32])
end
@test ukf1() ≈ [51, 32] atol=1e-3
ukf2 = UnscentedKalmanFilter(linmodel1, nint_u=[1, 1], nint_ym=[0, 0], direct=false)
- for i in 1:100
+ for i in 1:40
preparestate!(ukf2, [50, 30])
updatestate!(ukf2, [11, 52], [50, 30])
end
@test ukf2() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(ukf2, [51, 32])
updatestate!(ukf2, [10, 50], [51, 32])
end
@@ -669,23 +669,23 @@ end
@test initstate!(ekf1, [10, 50], [50, 30+1]) ≈ zeros(4);
setstate!(ekf1, [1,2,3,4])
@test ekf1.x̂0 ≈ [1,2,3,4]
- for i in 1:100
+ for i in 1:40
preparestate!(ekf1, [50, 30])
updatestate!(ekf1, [11, 52], [50, 30])
end
@test ekf1() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(ekf1, [51, 32])
updatestate!(ekf1, [10, 50], [51, 32])
end
@test ekf1() ≈ [51, 32] atol=1e-3
ekf2 = ExtendedKalmanFilter(linmodel1, nint_u=[1, 1], nint_ym=[0, 0], direct=false)
- for i in 1:100
+ for i in 1:40
preparestate!(ekf2, [50, 30])
updatestate!(ekf2, [11, 52], [50, 30])
end
@test ekf2() ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(ekf2, [51, 32])
updatestate!(ekf2, [10, 50], [51, 32])
end
@@ -773,11 +773,11 @@ end
mhe7 = MovingHorizonEstimator(nonlinmodel, He=10)
@test mhe7.He == 10
- @test length(mhe7.X̂0) == 10*6
- @test length(mhe7.Y0m) == 10*2
- @test length(mhe7.U0) == 10*2
- @test length(mhe7.D0) == 10*1
- @test length(mhe7.Ŵ) == 10*6
+ @test length(mhe7.X̂0) == mhe7.He*6
+ @test length(mhe7.Y0m) == mhe7.He*2
+ @test length(mhe7.U0) == mhe7.He*2
+ @test length(mhe7.D0) == (mhe7.He+mhe7.direct)*1
+ @test length(mhe7.Ŵ) == mhe7.He*6
mhe8 = MovingHorizonEstimator(nonlinmodel, He=5, nint_u=[1, 1], nint_ym=[0, 0])
@test mhe8.nxs == 2
@@ -818,6 +818,7 @@ end
f(x,u,d) = linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
h(x,d) = linmodel1.C*x + linmodel1.Dd*d
nonlinmodel = setop!(NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing), uop=[10,50], yop=[50,30], dop=[5])
+
mhe1 = MovingHorizonEstimator(nonlinmodel, He=2)
preparestate!(mhe1, [50, 30], [5])
x̂ = updatestate!(mhe1, [10, 50], [50, 30], [5])
@@ -831,18 +832,63 @@ end
@test initstate!(mhe1, [10, 50], [50, 30+1], [5]) ≈ zeros(6) atol=1e-9
setstate!(mhe1, [1,2,3,4,5,6])
@test mhe1.x̂0 ≈ [1,2,3,4,5,6]
- for i in 1:100
+ for i in 1:40
preparestate!(mhe1, [50, 30], [5])
updatestate!(mhe1, [11, 52], [50, 30], [5])
end
@test mhe1([5]) ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(mhe1, [51, 32], [5])
updatestate!(mhe1, [10, 50], [51, 32], [5])
end
@test mhe1([5]) ≈ [51, 32] atol=1e-3
-
+
+ mhe1 = MovingHorizonEstimator(nonlinmodel, He=2, nint_u=[1, 1], nint_ym=[0, 0], direct=false)
+ preparestate!(mhe1, [50, 30], [5])
+ x̂ = updatestate!(mhe1, [10, 50], [50, 30], [5])
+ @test x̂ ≈ zeros(6) atol=1e-9
+ @test mhe1.x̂0 ≈ zeros(6) atol=1e-9
+ @test evaloutput(mhe1, [5]) ≈ mhe1([5]) ≈ [50, 30]
+ info = getinfo(mhe1)
+ @test info[:x̂] ≈ x̂ atol=1e-9
+ @test info[:Ŷ][end-1:end] ≈ [50, 30] atol=1e-9
+
+ @test initstate!(mhe1, [10, 50], [50, 30+1], [5]) ≈ zeros(6) atol=1e-9
+ setstate!(mhe1, [1,2,3,4,5,6])
+ @test mhe1.x̂0 ≈ [1,2,3,4,5,6]
+ for i in 1:40
+ preparestate!(mhe1, [50, 30], [5])
+ updatestate!(mhe1, [11, 52], [50, 30], [5])
+ end
+ @test mhe1([5]) ≈ [50, 30] atol=1e-3
+ for i in 1:40
+ preparestate!(mhe1, [51, 32], [5])
+ updatestate!(mhe1, [10, 50], [51, 32], [5])
+ end
+ @test mhe1([5]) ≈ [51, 32] atol=1e-3
+
+ mhe2 = MovingHorizonEstimator(linmodel1, He=2)
+ preparestate!(mhe2, [50, 30], [5])
+ x̂ = updatestate!(mhe2, [10, 50], [50, 30], [5])
+ @test x̂ ≈ zeros(6) atol=1e-9
+ @test mhe2.x̂0 ≈ zeros(6) atol=1e-9
+ @test evaloutput(mhe2, [5]) ≈ mhe2([5]) ≈ [50, 30]
+ info = getinfo(mhe2)
+ @test info[:x̂] ≈ x̂ atol=1e-9
+ @test info[:Ŷ][end-1:end] ≈ [50, 30] atol=1e-9
+ for i in 1:40
+ preparestate!(mhe2, [50, 30], [5])
+ updatestate!(mhe2, [11, 52], [50, 30], [5])
+ end
+ @test mhe2([5]) ≈ [50, 30] atol=1e-3
+ for i in 1:40
+ preparestate!(mhe2, [51, 32], [5])
+ updatestate!(mhe2, [10, 50], [51, 32], [5])
+ end
+ @test mhe2([5]) ≈ [51, 32] atol=1e-3
+
mhe2 = MovingHorizonEstimator(linmodel1, He=2, nint_u=[1, 1], nint_ym=[0, 0], direct=false)
+ preparestate!(mhe2, [50, 30], [5])
x̂ = updatestate!(mhe2, [10, 50], [50, 30], [5])
@test x̂ ≈ zeros(6) atol=1e-9
@test mhe2.x̂0 ≈ zeros(6) atol=1e-9
@@ -850,12 +896,12 @@ end
info = getinfo(mhe2)
@test info[:x̂] ≈ x̂ atol=1e-9
@test info[:Ŷ][end-1:end] ≈ [50, 30] atol=1e-9
- for i in 1:100
+ for i in 1:40
preparestate!(mhe2, [50, 30], [5])
updatestate!(mhe2, [11, 52], [50, 30], [5])
end
@test mhe2([5]) ≈ [50, 30] atol=1e-3
- for i in 1:100
+ for i in 1:40
preparestate!(mhe2, [51, 32], [5])
updatestate!(mhe2, [10, 50], [51, 32], [5])
end
@@ -1068,44 +1114,10 @@ end
@test info[:V̂] ≈ [-1,-1] atol=5e-2
end
-@testset "MovingHorizonEstimator v.s. Kalman filters" begin
- linmodel1 = setop!(LinModel(sys,Ts,i_d=[3]), uop=[10,50], yop=[50,30], dop=[20])
- mhe = MovingHorizonEstimator(linmodel1, He=3, nint_ym=0, direct=false)
- kf = KalmanFilter(linmodel1, nint_ym=0, direct=false)
- X̂0_mhe = zeros(4, 6)
- X̂0_kf = zeros(4, 6)
- for i in 1:6
- X̂0_mhe[:,i] = mhe.x̂0
- X̂0_kf[:,i] = kf.x̂0
- preparestate!(mhe, [50, 31], [25])
- updatestate!(mhe, [11, 50], [50, 31], [25])
- preparestate!(kf, [50, 31], [25])
- updatestate!(kf, [11, 50], [50, 31], [25])
- end
- @test X̂0_mhe ≈ X̂0_kf atol=1e-3
- f = (x,u,d) -> linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
- h = (x,d) -> linmodel1.C*x + linmodel1.Dd*d
- nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
- nonlinmodel = setop!(nonlinmodel, uop=[10,50], yop=[50,30], dop=[20])
- mhe = MovingHorizonEstimator(nonlinmodel, He=5, nint_ym=0, direct=false)
- ukf = UnscentedKalmanFilter(nonlinmodel, nint_ym=0, direct=false)
- X̂0_mhe = zeros(4, 6)
- X̂0_ukf = zeros(4, 6)
- for i in 1:6
- X̂0_mhe[:,i] = mhe.x̂0
- X̂0_ukf[:,i] = ukf.x̂0
- preparestate!(mhe, [50, 31], [25])
- updatestate!(mhe, [11, 50], [50, 31], [25])
- preparestate!(ukf, [50, 31], [25])
- updatestate!(ukf, [11, 50], [50, 31], [25])
- end
- @test X̂0_mhe ≈ X̂0_ukf atol=1e-3
-end
-
@testset "MovingHorizonEstimator set model" begin
linmodel = LinModel(ss(0.5, 0.3, 1.0, 0, 10.0))
linmodel = setop!(linmodel, uop=[2.0], yop=[50.0], xop=[3.0], fop=[3.0])
- mhe = MovingHorizonEstimator(linmodel, He=1, nint_ym=0)
+ mhe = MovingHorizonEstimator(linmodel, He=1, nint_ym=0, direct=false)
setconstraint!(mhe, x̂min=[-1000], x̂max=[1000])
@test mhe. ≈ [0.5]
@test evaloutput(mhe) ≈ [50.0]
@@ -1144,4 +1156,129 @@ end
@test mhe2.Q̂ ≈ [1e-3]
@test mhe2.R̂ ≈ [1e-6]
@test_throws ErrorException setmodel!(mhe2, deepcopy(nonlinmodel))
-end
\ No newline at end of file
+end
+
+@testset "MovingHorizonEstimator v.s. Kalman filters" begin
+ linmodel1 = setop!(LinModel(sys,Ts,i_d=[3]), uop=[10,50], yop=[50,30], dop=[20])
+ mhe = MovingHorizonEstimator(linmodel1, He=3, nint_ym=0, direct=false)
+ kf = KalmanFilter(linmodel1, nint_ym=0, direct=false)
+ X̂_mhe = zeros(4, 6)
+ X̂_kf = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_mhe = preparestate!(mhe, y, [25])
+ x̂_kf = preparestate!(kf, y, [25])
+ X̂_mhe[:,i] = x̂_mhe
+ X̂_kf[:,i] = x̂_kf
+ updatestate!(mhe, [11, 50], y, [25])
+ updatestate!(kf, [11, 50], y, [25])
+ end
+ @test X̂_mhe ≈ X̂_kf atol=1e-3 rtol=1e-3
+ mhe = MovingHorizonEstimator(linmodel1, He=3, nint_ym=0, direct=true)
+ kf = KalmanFilter(linmodel1, nint_ym=0, direct=true)
+ X̂_mhe = zeros(4, 6)
+ X̂_kf = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_mhe = preparestate!(mhe, y, [25])
+ x̂_kf = preparestate!(kf, y, [25])
+ X̂_mhe[:,i] = x̂_mhe
+ X̂_kf[:,i] = x̂_kf
+ updatestate!(mhe, [11, 50], y, [25])
+ updatestate!(kf, [11, 50], y, [25])
+ end
+ @test X̂_mhe ≈ X̂_kf atol=1e-3 rtol=1e-3
+ f = (x,u,d) -> linmodel1.A*x + linmodel1.Bu*u + linmodel1.Bd*d
+ h = (x,d) -> linmodel1.C*x + linmodel1.Dd*d
+ nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
+ nonlinmodel = setop!(nonlinmodel, uop=[10,50], yop=[50,30], dop=[20])
+ mhe = MovingHorizonEstimator(nonlinmodel, He=5, nint_ym=0, direct=false)
+ ukf = UnscentedKalmanFilter(nonlinmodel, nint_ym=0, direct=false)
+ ekf = ExtendedKalmanFilter(nonlinmodel, nint_ym=0, direct=false)
+ X̂_mhe = zeros(4, 6)
+ X̂_ukf = zeros(4, 6)
+ X̂_ekf = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_mhe = preparestate!(mhe, y, [25])
+ x̂_ukf = preparestate!(ukf, y, [25])
+ x̂_ekf = preparestate!(ekf, y, [25])
+ X̂_mhe[:,i] = x̂_mhe
+ X̂_ukf[:,i] = x̂_ukf
+ X̂_ekf[:,i] = x̂_ekf
+ updatestate!(mhe, [11, 50], y, [25])
+ updatestate!(ukf, [11, 50], y, [25])
+ updatestate!(ekf, [11, 50], y, [25])
+ end
+ @test X̂_mhe ≈ X̂_ukf atol=1e-3 rtol=1e-3
+ @test X̂_mhe ≈ X̂_ekf atol=1e-3 rtol=1e-3
+ mhe = MovingHorizonEstimator(nonlinmodel, He=5, nint_ym=0, direct=true)
+ ukf = UnscentedKalmanFilter(nonlinmodel, nint_ym=0, direct=true)
+ ekf = ExtendedKalmanFilter(nonlinmodel, nint_ym=0, direct=true)
+ X̂_mhe = zeros(4, 6)
+ X̂_ukf = zeros(4, 6)
+ X̂_ekf = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_mhe = preparestate!(mhe, y, [25])
+ x̂_ukf = preparestate!(ukf, y, [25])
+ x̂_ekf = preparestate!(ekf, y, [25])
+ X̂_mhe[:,i] = x̂_mhe
+ X̂_ukf[:,i] = x̂_ukf
+ X̂_ekf[:,i] = x̂_ekf
+ updatestate!(mhe, [11, 50], y, [25])
+ updatestate!(ukf, [11, 50], y, [25])
+ updatestate!(ekf, [11, 50], y, [25])
+ end
+ @test X̂_mhe ≈ X̂_ukf atol=1e-3 rtol=1e-3
+ @test X̂_mhe ≈ X̂_ekf atol=1e-3 rtol=1e-3
+end
+
+@testset "MovingHorizonEstimator LinModel v.s. NonLinModel" begin
+ linmodel = setop!(LinModel(sys,Ts,i_d=[3]), uop=[10,50], yop=[50,30], dop=[20])
+ f = (x,u,d) -> linmodel.A*x + linmodel.Bu*u + linmodel.Bd*d
+ h = (x,d) -> linmodel.C*x + linmodel.Dd*d
+ nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, solver=nothing)
+ nonlinmodel = setop!(nonlinmodel, uop=[10,50], yop=[50,30], dop=[20])
+ optim = JuMP.Model(optimizer_with_attributes(Ipopt.Optimizer, "sb" => "yes"))
+ mhe_lin = MovingHorizonEstimator(linmodel, He=5, nint_ym=0, direct=true, optim=optim)
+ mhe_lin = setconstraint!(
+ mhe_lin, x̂min=[-100, -100, -100, -100], ŵmin=[10,10,10,10]
+ )
+ mhe_nonlin = MovingHorizonEstimator(nonlinmodel, He=5, nint_ym=0, direct=true)
+ mhe_nonlin = setconstraint!(
+ mhe_nonlin, x̂min=[-100, -100, -100, -100], ŵmin=[10,10,10,10]
+ )
+ X̂_lin = zeros(4, 6)
+ X̂_nonlin = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_lin = preparestate!(mhe_lin, y, [25])
+ x̂_nonlin = preparestate!(mhe_nonlin, y, [25])
+ X̂_lin[:,i] = x̂_lin
+ X̂_nonlin[:,i] = x̂_nonlin
+ updatestate!(mhe_lin, [11, 50], y, [25])
+ updatestate!(mhe_nonlin, [11, 50], y, [25])
+ end
+ @test X̂_lin ≈ X̂_nonlin atol=1e-3 rtol=1e-3
+ mhe2_lin = MovingHorizonEstimator(linmodel, He=5, nint_ym=0, direct=false, optim=optim)
+ mhe2_lin = setconstraint!(
+ mhe2_lin, x̂min=[-100, -100, -100, -100], ŵmin=[10,10,10,10]
+ )
+ mhe2_nonlin = MovingHorizonEstimator(nonlinmodel, He=5, nint_ym=0, direct=false)
+ mhe2_nonlin = setconstraint!(
+ mhe2_nonlin, x̂min=[-100, -100, -100, -100], ŵmin=[10,10,10,10]
+ )
+ X̂_lin = zeros(4, 6)
+ X̂_nonlin = zeros(4, 6)
+ for i in 1:6
+ y = [50,31] + randn(2)
+ x̂_lin = preparestate!(mhe2_lin, y, [25])
+ x̂_nonlin = preparestate!(mhe2_nonlin, y, [25])
+ X̂_lin[:,i] = x̂_lin
+ X̂_nonlin[:,i] = x̂_nonlin
+ updatestate!(mhe2_lin, [11, 50], y, [25])
+ updatestate!(mhe2_nonlin, [11, 50], y, [25])
+ end
+ @test X̂_lin ≈ X̂_nonlin atol=1e-3 rtol=1e-3
+end