Skip to content

Commit f6eb800

Browse files
committed
added: MovingHorizonEstimator now uses value_and_jacobian! of DI.jl
1 parent 50960cd commit f6eb800

File tree

2 files changed

+42
-41
lines changed

2 files changed

+42
-41
lines changed

src/controller/nonlinmpc.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -566,7 +566,7 @@ This method is really intricate and I'm not proud of it. That's because of 3 ele
566566
567567
- These functions are used inside the nonlinear optimization, so they must be type-stable
568568
and as efficient as possible. All the function outputs and derivatives are cached and
569-
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!)`.
569+
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!).
570570
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
571571
of `Vararg{T,N}` (see the [performance tip][@extref Julia Be-aware-of-when-Julia-avoids-specializing]
572572
) and memoization to avoid redundant computations. This is already complex, but it's even
@@ -577,7 +577,7 @@ This method is really intricate and I'm not proud of it. That's because of 3 ele
577577
Inspired from: [User-defined operators with vector outputs](@extref JuMP User-defined-operators-with-vector-outputs)
578578
"""
579579
function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT<:Real
580-
# ----- common cache for Jfunc, gfuncs, geqfuncs called with floats -------------------
580+
# ----------- common cache for Jfunc, gfuncs and geqfuncs ----------------------------
581581
model = mpc.estim.model
582582
grad, jac = mpc.gradient, mpc.jacobian
583583
nu, ny, nx̂, nϵ, nk = model.nu, model.ny, mpc.estim.nx̂, mpc.nϵ, model.nk

src/estimator/mhe/construct.jl

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1314,97 +1314,98 @@ Also return vectors with the nonlinear inequality constraint functions `gfuncs`,
13141314
This method is really intricate and I'm not proud of it. That's because of 3 elements:
13151315
13161316
- These functions are used inside the nonlinear optimization, so they must be type-stable
1317-
and as efficient as possible.
1317+
and as efficient as possible. All the function outputs and derivatives are cached and
1318+
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!).
13181319
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
13191320
of `Vararg{T,N}` (see the [performance tip](@extref Julia Be-aware-of-when-Julia-avoids-specializing))
13201321
and memoization to avoid redundant computations. This is already complex, but it's even
13211322
worse knowing that most automatic differentiation tools do not support splatting.
1322-
- The signature of gradient and hessian functions is not the same for univariate (`nZ̃ == 1`)
1323-
and multivariate (`nZ̃ > 1`) operators in `JuMP`. Both must be defined.
13241323
13251324
Inspired from: [User-defined operators with vector outputs](@extref JuMP User-defined-operators-with-vector-outputs)
13261325
"""
13271326
function get_optim_functions(
13281327
estim::MovingHorizonEstimator, ::JuMP.GenericModel{JNT},
13291328
) where {JNT <: Real}
1330-
# ---------- common cache for Jfunc, gfuncs called with floats ------------------------
1329+
# ----------- common cache for Jfunc and gfuncs --------------------------------------
13311330
model, con = estim.model, estim.con
1331+
grad, jac = estim.gradient, estim.jacobian
13321332
nx̂, nym, nŷ, nu, nϵ, nk = estim.nx̂, estim.nym, model.ny, model.nu, estim.nϵ, model.nk
13331333
He = estim.He
13341334
nV̂, nX̂, ng, nZ̃ = He*nym, He*nx̂, length(con.i_g), length(estim.Z̃)
1335-
myNaN = convert(JNT, NaN) # NaN to force update_simulations! at first call
13361335
strict = Val(true)
1337-
::Vector{JNT} = fill(myNaN, nZ̃)
1336+
myNaN = convert(JNT, NaN)
1337+
J::Vector{JNT} = zeros(JNT, 1)
13381338
::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nV̂), zeros(JNT, nX̂)
1339-
k0::Vector{JNT} = zeros(JNT, nk)
1339+
k0::Vector{JNT} = zeros(JNT, nk)
13401340
û0::Vector{JNT}, ŷ0::Vector{JNT} = zeros(JNT, nu), zeros(JNT, nŷ)
13411341
g::Vector{JNT} = zeros(JNT, ng)
13421342
::Vector{JNT} = zeros(JNT, nx̂)
13431343
# --------------------- objective functions -------------------------------------------
1344-
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
1345-
if isdifferent(Z̃arg, Z̃)
1346-
Z̃ .= Z̃arg
1347-
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
1348-
end
1349-
return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)::T
1350-
end
13511344
function Jfunc!(Z̃, V̂, X̂0, û0, k0, ŷ0, g, x̄)
13521345
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
13531346
return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)
13541347
end
1355-
Z̃_∇J = fill(myNaN, nZ̃)
1348+
Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
13561349
∇J_context = (
13571350
Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0),
13581351
Cache(g),
13591352
Cache(x̄),
13601353
)
13611354
# temporarily "fill" the estimation window for the preparation of the gradient:
13621355
estim.Nk[] = He
1363-
∇J_prep = prepare_gradient(Jfunc!, estim.gradient, Z̃_∇J, ∇J_context...; strict)
1356+
∇J_prep = prepare_gradient(Jfunc!, grad, Z̃_∇J, ∇J_context...; strict)
13641357
estim.Nk[] = 0
13651358
∇J = Vector{JNT}(undef, nZ̃)
1366-
∇Jfunc! = function (∇J::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
1359+
function update_objective!(J, ∇J, Z̃, Z̃arg)
1360+
if isdifferent(Z̃arg, Z̃)
1361+
Z̃ .= Z̃arg
1362+
J[], _ = value_and_gradient!(Jfunc!, ∇J, ∇J_prep, grad, Z̃_∇J, ∇J_context...)
1363+
end
1364+
end
1365+
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
1366+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
1367+
return J[]::T
1368+
end
1369+
∇Jfunc! = function (∇Jarg::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
13671370
# only the multivariate syntax of JuMP.@operator, univariate is impossible for MHE
13681371
# since Z̃ comprises the arrival state estimate AND the estimated process noise
1369-
Z̃_∇J .= Z̃arg
1370-
gradient!(Jfunc!, ∇J, ∇J_prep, estim.gradient, Z̃_∇J, ∇J_context...)
1371-
return ∇J
1372+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
1373+
return ∇Jarg .= ∇J
13721374
end
1373-
13741375
# --------------------- inequality constraint functions -------------------------------
1375-
gfuncs = Vector{Function}(undef, ng)
1376-
for i in eachindex(gfuncs)
1377-
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
1378-
if isdifferent(Z̃arg, Z̃)
1379-
Z̃ .= Z̃arg
1380-
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
1381-
end
1382-
return g[i]::T
1383-
end
1384-
gfuncs[i] = gfunc_i
1385-
end
13861376
function gfunc!(g, Z̃, V̂, X̂0, û0, k0, ŷ0)
13871377
return update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
13881378
end
1389-
Z̃_∇g = fill(myNaN, nZ̃)
1379+
Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
13901380
∇g_context = (
13911381
Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0),
13921382
)
13931383
# temporarily enable all the inequality constraints for sparsity detection:
13941384
estim.con.i_g .= true
13951385
estim.Nk[] = He
1396-
∇g_prep = prepare_jacobian(gfunc!, g, estim.jacobian, Z̃_∇g, ∇g_context...; strict)
1386+
∇g_prep = prepare_jacobian(gfunc!, g, jac, Z̃_∇g, ∇g_context...; strict)
13971387
estim.con.i_g .= false
13981388
estim.Nk[] = 0
1399-
∇g = init_diffmat(JNT, estim.jacobian, ∇g_prep, nZ̃, ng)
1389+
∇g = init_diffmat(JNT, jac, ∇g_prep, nZ̃, ng)
1390+
function update_con!(g, ∇g, Z̃, Z̃arg)
1391+
if isdifferent(Z̃arg, Z̃)
1392+
Z̃ .= Z̃arg
1393+
value_and_jacobian!(gfunc!, g, ∇g, ∇g_prep, jac, Z̃, ∇g_context...)
1394+
end
1395+
end
1396+
gfuncs = Vector{Function}(undef, ng)
1397+
for i in eachindex(gfuncs)
1398+
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
1399+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
1400+
return g[i]::T
1401+
end
1402+
gfuncs[i] = gfunc_i
1403+
end
14001404
∇gfuncs! = Vector{Function}(undef, ng)
14011405
for i in eachindex(∇gfuncs!)
14021406
∇gfuncs![i] = function (∇g_i, Z̃arg::Vararg{T, N}) where {N, T<:Real}
14031407
# only the multivariate syntax of JuMP.@operator, see above for the explanation
1404-
if isdifferent(Z̃arg, Z̃_∇g)
1405-
Z̃_∇g .= Z̃arg
1406-
jacobian!(gfunc!, g, ∇g, ∇g_prep, estim.jacobian, Z̃_∇g, ∇g_context...)
1407-
end
1408+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
14081409
return ∇g_i .= @views ∇g[i, :]
14091410
end
14101411
end

0 commit comments

Comments
 (0)