Skip to content

[New algorithm] Runge-Kutta in the interaction picture (RKIP) #2736

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions lib/OrdinaryDiffEqRKIP/LICENSE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
The OrdinaryDiffEq.jl package is licensed under the MIT "Expat" License:

> Copyright (c) 2016-2020: ChrisRackauckas, Yingbo Ma, Julia Computing Inc, and
> other contributors:
>
> https://github.com/SciML/OrdinaryDiffEq.jl/graphs/contributors
>
> Permission is hereby granted, free of charge, to any person obtaining a copy
> of this software and associated documentation files (the "Software"), to deal
> in the Software without restriction, including without limitation the rights
> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
> copies of the Software, and to permit persons to whom the Software is
> furnished to do so, subject to the following conditions:
>
> The above copyright notice and this permission notice shall be included in all
> copies or substantial portions of the Software.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> SOFTWARE.
44 changes: 44 additions & 0 deletions lib/OrdinaryDiffEqRKIP/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
name = "OrdinaryDiffEqRKIP"
uuid = "a4daff8c-1d43-4ff3-8eff-f78720aeecdc"
authors = ["Azercoco <[email protected]>"]
version = "1.0.0"


[sources]
OrdinaryDiffEqCore = {path = "../OrdinaryDiffEqCore"}

[deps]
DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e"
DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MaybeInplace = "bb5d69b7-63fc-4a16-80bd-7e42200c7bdb"
OrdinaryDiffEqCore = "bbf590c4-e513-4bbe-9b18-05decba2e5d8"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"

[compat]
DiffEqBase = "6.175"
DiffEqDevTools = "2.48"
MaybeInplace = "0.1.4"
OrdinaryDiffEqCore = "1.26"
SciMLBase = "2.99"
SciMLOperators = "1.3.1"
StaticArrays = "1.9.13"
UnPack = "1.0.2"
LinearAlgebra = "1.11"
CUDA = "5.5.2"
FFTW = "1.8.0"
SafeTestsets = "0.1.0"

julia = "1.11"

[extras]
FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

[targets]
test = ["FFTW", "Test", "SafeTestsets", "CUDA"]
26 changes: 26 additions & 0 deletions lib/OrdinaryDiffEqRKIP/src/OrdinaryDiffEqRKIP.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
module OrdinaryDiffEqRKIP

using LinearAlgebra: ldiv!, exp, axpy!, norm, mul!
using SciMLOperators: AbstractSciMLOperator
using UnPack: @pack!, @unpack
using MaybeInplace: @bb
using SciMLBase: isinplace
using DiffEqBase: ExplicitRKTableau
using DiffEqDevTools: constructVerner6

import OrdinaryDiffEqCore: OrdinaryDiffEqAdaptiveExponentialAlgorithm, alg_adaptive_order,
alg_order, alg_cache, @cache, SplitFunction, get_fsalfirstlast,
initialize!, perform_step!,
has_dtnew_modification, calculate_residuals,
calculate_residuals!, increment_nf!,
OrdinaryDiffEqAdaptiveAlgorithm, OrdinaryDiffEqMutableCache,
dtnew_modification, generic_solver_docstring

include("rkip_cache.jl")
include("algorithms.jl")
include("rkip_utils.jl")
include("rkip_perform_step.jl")

export RKIP

end
176 changes: 176 additions & 0 deletions lib/OrdinaryDiffEqRKIP/src/algorithms.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
using OrdinaryDiffEqCore: OrdinaryDiffEqCore

METHOD_DESCRIPTION = """
Runge-Kutta in the interaction picture.

This is suited for solving semilinear problem of the form:

```math
\frac{du}{dt} = Au + f(u,p,t)
```

where A is possibly stiff time-independent linear operator whose scaled exponential exp(Ah) can be calculated efficiently for any h.
The problem is first transformed in a non-stiff variant (interaction picture)

```math
\begin{aligned}
u_I(t) &= \exp(-At) u(t) \\
\frac{du_I}{dt} &= f_I(u_I,p,t) \\
f_I(u_I,p,t) &= f(exp(-At)u_I, p, t) \\
\end{aligned}
```
and is then solved with an explicit (adaptive) Runge-Kutta method.

This solver is only implemented for semilinear problem: `SplitODEProblem` when the first function `f1` is a `AbstractSciMLOperator` A implementing:

```julia
LinearAlgebra.exp(A, t) # = exp(A*t)
Comment on lines +24 to +27
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should instead be using ExponentialUtilities.jl algorithm choice for exponential!

Copy link
Author

@Azercoco Azercoco Jun 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As the implementation of the exponential is very operator dependent, are you sure that is should not use the AbstractSciMLOperator interface instead ?

cf : https://github.com/SciML/SciMLOperators.jl/blob/7ba386430a229776b41f481ff352eacd7c9f09d4/src/interface.jl#L382C1-L382C60

I checked the code of ExponetialUtilities and it seems to be useful when the exponential matrix output is dense. But in that case, caching the matrix as made here does not make a lot of sense and one would be better to used an ETD method with Krylov based expmv.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As the implementation of the exponential is very operator dependent, are you sure that is should not use the AbstractSciMLOperator interface instead ?

ExponentialUtilities.jl will specialize on claimed properties like symmetric.

I checked the code of ExponetialUtilities and it seems to be useful when the exponential matrix output is dense. But in that case, caching the matrix as made here does not make a lot of sense and one would be better to used an ETD method with Krylov based expmv.

You mean sparse? expv is for the sparse case. But exponential! is for the dense case, and it will be faster for standard dense matrices.

```
`A` and the return value of `exp(A, t)` must either also both implement the `AbstractSciMLOperator` interface:
```julia
A(du, u, v, p, t) # for in-place problem
A(u, v, p, t) # for out-of-place problem
```

For performance, the algorithm will cache and reuse the computed operator-exponential for a fixed set of time steps.

# Arguments
- `dtmin::T`: the smallest step `dt` for which `exp(A*dt)` will be cached. Default is `1e-3`
- `dtmax::T`: the largest step `dt` for which `exp(A*dt)` will be cached. Default is `1.0`

The fixed steps will follow a geometric progression.
Time stepping can still happen outside the bounds (for the end step for e.g) but no cache will occur (`exp(A*dt)` getting computed each step) degrading the performances.
The time step can be forcibly clamped within the cache range through the keywords `clamp_lower_dt` and `clamp_higher_dt`.

The cached operator exponentials are also directly stored in the algorithm such that:

```julia
rkip = RKIP()
solve(ode_prob_1, rkip, t1)
solve(ode_prob_2, rkip, t2)
````

will reuse the precomputed exponential cached during the first `solve` call.
This can be useful for solving several times successively problems with a common `A`.
Comment on lines +49 to +54
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

t1 and t2 aren't defined here. And this is interface breaking: it should just use init and reinit! / solve! and it'll do this without the extra work anyways.


"""
REFERENCE = """Zhongxi Zhang, Liang Chen, and Xiaoyi Bao, "A fourth-order Runge-Kutta in the interaction picture method for numerically solving the coupled nonlinear Schrödinger equation," Opt. Express 18, 8261-8276 (2010)"""

KEYWORD_DESCRIPTION = """
- `nb_of_cache_step::Integer`: the number of steps. Default is `100`.
- `tableau::ExplicitRKTableau`: the Runge-Kutta Tableau to use. Default is `constructVerner6()`.
- `clamp_lower_dt::Bool`: whether to clamp proposed step to the smallest cached step in order to force the use of cached exponential, improving performance.
This may prevent reaching the desired tolerance. Default is `false`.
- `clamp_higher_dt::Bool`: whether to clamp proposed step to the largest cached step in order to force the use of cached exponential, improving performance.
This can cause performance degradation if `integrator.dtmax` is too small. Default is `true`.
- `use_ldiv::Bool`: whether, to use `ldiv(exp(A, t), v)` instead of caching `exp(A, -t)*v`. Reduces the memory usage but is slightly less efficient. `ldiv` must be implemented. Only works for in-place problems. Default is `false`.
"""

@doc generic_solver_docstring(
METHOD_DESCRIPTION, "RKIP", "Adaptative Exponential Runge-Kutta",
REFERENCE, KEYWORD_DESCRIPTION, "")
mutable struct RKIP{
tableauType <: ExplicitRKTableau, elType, dtType <: AbstractVector{elType}} <:
OrdinaryDiffEqAdaptiveAlgorithm
tableau::tableauType
dt_for_expÂ_caching::dtType
clamp_lower_dt::Bool
clamp_higher_dt::Bool
use_ldiv::Bool
cache::Union{Nothing, RKIPCache}
end

function RKIP(dtmin::T = 1e-3, dtmax::T = 1.0; nb_of_cache_step::Int = 100,
tableau = constructVerner6(T), clamp_lower_dt::Bool = false,
clamp_higher_dt::Bool = true, use_ldiv = false) where {T}
RKIP{
typeof(tableau), T, Vector{T}}(
tableau,
logrange(dtmin, dtmax, nb_of_cache_step),
clamp_lower_dt,
clamp_higher_dt,
use_ldiv,
nothing
)
end

alg_order(alg::RKIP) = alg.tableau.order
alg_adaptive_order(alg::RKIP) = alg.tableau.adaptiveorder

has_dtnew_modification(alg::RKIP) = true

function dtnew_modification(alg::RKIP{tableauType, elType, dtType},
dtnew) where {tableauType, elType, dtType}
@unpack dt_for_expÂ_caching = alg
if first(alg.dt_for_expÂ_caching) > dtnew && alg.clamp_lower_dt
dtnew = first(alg.dt_for_expÂ_caching)
elseif last(alg.dt_for_expÂ_caching) < dtnew && alg.clamp_higher_dt
dtnew = last(alg.dt_for_expÂ_caching)
else
dtnew = alg.dt_for_expÂ_caching[searchsortedfirst(alg.dt_for_expÂ_caching, dtnew)]
end
return dtnew
end

dtnew_modification(_, alg::RKIP, dtnew) = dtnew_modification(alg, dtnew)

function alg_cache(
alg::RKIP, u::uType, rate_prototype, uEltypeNoUnits, uBottomEltypeNoUnits,
tTypeNoUnits, uprev, uprev2, f, t, dt, reltol, p, calck, iip) where {uType}
tmp = zero(u)
utilde = zero(u)
kk = [zero(u) for _ in 1:(alg.tableau.stages)]

 = isa(f, SplitFunction) ? f.f1.f :
throw(ArgumentError("RKIP is only implemented for semilinear problems"))
opType = typeof(Â)
expOpType = typeof(exp(Â, 1.0))

if isnothing(alg.cache)
is_cached = Vector{Bool}(undef, length(alg.dt_for_expÂ_caching))
fill!(is_cached, false)

c_extended = vcat(alg.tableau.c, 1.0) # all the c values of Runge-Kutta and 1 wich is needed for the RKIP step
c_unique = unique(c_extended) # in some tableau, there is duplicate: we only keep the unique value to save on caching time and memory
c_index = [findfirst(==(c), c_unique) for c in c_extended] # index mapping

exp_cache = ExpCache{expOpType}(
Array{expOpType, 2}(undef, length(alg.dt_for_expÂ_caching), length(c_unique)),
Vector{expOpType}(undef, length(c_unique)))

if !alg.use_ldiv
exp_cache = ExpCacheNoLdiv(exp_cache,
ExpCache{expOpType}(
Array{expOpType, 2}(
undef, length(alg.dt_for_expÂ_caching), length(c_unique)),
Vector{expOpType}(undef, length(c_unique))))
expCacheType = ExpCacheNoLdiv{expOpType}
else
expCacheType = ExpCache{expOpType}
end

alg.cache = RKIPCache{expOpType, expCacheType, tTypeNoUnits, opType, uType, iip}(
exp_cache,
zero(tTypeNoUnits),
is_cached,
tmp,
utilde,
kk,
c_unique,
c_index
)
else # cache recycling
alg.cache = RKIPCache{
expOpType, typeof(alg.cache.exp_cache), tTypeNoUnits, opType, uType, iip}(
alg.cache.exp_cache,
alg.cache.last_step,
alg.cache.cached,
tmp,
utilde,
kk,
alg.cache.c_unique,
alg.cache.c_mapping
)
end
return alg.cache
end
92 changes: 92 additions & 0 deletions lib/OrdinaryDiffEqRKIP/src/rkip_cache.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
abstract type AbstractExpCache{expOpType <: AbstractSciMLOperator} end

struct ExpCache{expOpType} <: AbstractExpCache{expOpType}
expÂ_cached::Array{expOpType, 2}
expÂ_for_this_step::Vector{expOpType}
end
struct ExpCacheNoLdiv{expOpType} <: AbstractExpCache{expOpType}
exp_cache::ExpCache{expOpType}
exp_cache_inv::ExpCache{expOpType}
end

function get_op_for_this_step(cache::ExpCache{expOpType}, index::Int) where {expOpType}
cache.expÂ_for_this_step[index]
end
function get_op_for_this_step(cache_no_ldiv::ExpCacheNoLdiv{expOpType},
positive::Bool, index::Int) where {expOpType}
positive ? cache_no_ldiv.exp_cache.expÂ_for_this_step[index] :
cache_no_ldiv.exp_cache_inv.expÂ_for_this_step[index]
end

mutable struct RKIPCache{
expOpType <: AbstractSciMLOperator, cacheType <: AbstractExpCache{expOpType},
tType <: Number, opType <: AbstractSciMLOperator, uType, iip} <:
OrdinaryDiffEqMutableCache
exp_cache::cacheType
last_step::tType
cached::Vector{Bool}
tmp::uType
utilde::uType
kk::Vector{uType}
c_unique::Vector{tType}
c_mapping::Vector{Integer}
end

get_fsalfirstlast(cache::RKIPCache, u) = (zero(cache.tmp), zero(cache.tmp))

@inline function cache_exp!(cache::ExpCache{expOpType},
A::opType,
h::T,
action::Symbol,
step_index::Int,
unique_stage_index::Int) where {
expOpType <: AbstractSciMLOperator, opType <: AbstractSciMLOperator, T <: Number}
@unpack expÂ_for_this_step, expÂ_cached = cache
expÂ_for_this_step[unique_stage_index] = (action == :use_cached) ?
expÂ_cached[step_index, unique_stage_index] :
exp(A, h) # fetching or generating exp(Â*c_i*dt)
if action == :cache
expÂ_cached[step_index, unique_stage_index] = expÂ_for_this_step[unique_stage_index] # storing exp(Â*c_i*dt)
end
end

@inline function cache_exp!(cache::ExpCacheNoLdiv{expOpType},
Â::opType,
h::T,
action::Symbol,
step_index::Int,
unique_stage_index::Int) where {
expOpType <: AbstractSciMLOperator, opType <: AbstractSciMLOperator, T <: Number}
cache_exp!(cache.exp_cache, Â, h, action, step_index, unique_stage_index)
cache_exp!(cache.exp_cache_inv, Â, -h, action, step_index, unique_stage_index)
end

"""
Prepare/generate all the needed exp(± A * dt * c[i]) for a step size dt
"""
@inline function cache_exp_op_for_this_step!(
cache::RKIPCache{expOpType, cacheType, tType, opType, uType, iip},
Â::opType, dt::tType,
alg::algType) where {expOpType, cacheType, tType, opType, uType, algType, iip}
@unpack dt_for_expÂ_caching = alg

if !iszero(dt) && !(dt ≈ cache.last_step) # we check that new exp(A dt) are needed
dt_abs = abs(dt) # only the positive dt are used for indexing
action = :single_use # exp(A*dt) is only computed for this step

step_index = clamp(searchsortedlast(dt_for_expÂ_caching, dt_abs),
1, lastindex(dt_for_expÂ_caching)) # fetching the index corresponding to the step size

if dt_for_expÂ_caching[step_index] ≈ dt_abs # if dt corresponds to a cahing step
action = (cache.cached[step_index] ? :use_cached : :cache) # if alreay present, we reuse the cached, otherwise it is generated
end

for (unique_stage_index, c) in enumerate(cache.c_unique) # iterating over all unique c_i of the RK tableau
cache_exp!(
cache.exp_cache, Â, abs(dt * c), action, step_index, unique_stage_index) # generating and caching
end
cache.cached[step_index] |= (action == :cache) # set the flag that we have already cached exp(Â*c_i*dt) for this dt
end

cache.last_step = dt
end
Loading