Skip to content

Solver ecosystem revamp changes #7

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ jobs:
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- name: Clone SolverCore/revamp and OptSolver
run: julia -e 'using Pkg; pkg"activate ."; pkg"add SolverCore#revamp"; pkg"add https://github.com/JuliaSmoothOptimizers/OptSolver.jl#main"'
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
Expand Down
2 changes: 2 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,14 @@ ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
ADNLPModels = "0.1"
NLPModels = "0.14"
SolverCore = "0.2"
SolverTools = "0.4"
julia = "1.3"

Expand Down
2 changes: 1 addition & 1 deletion src/SolverTest.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module SolverTest
# stdlib
using LinearAlgebra, Logging, SparseArrays, Test
# JSO
using ADNLPModels, NLPModels
using ADNLPModels, NLPModels, OptSolver, SolverCore

include("nlp/unconstrained.jl")
include("nlp/bound-constrained.jl")
Expand Down
5 changes: 3 additions & 2 deletions src/nlp/bound-constrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,11 @@ end
Test the `solver` on bound-constrained problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function bound_constrained_nlp(solver; problem_set = bound_constrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
function bound_constrained_nlp(Solver; problem_set = bound_constrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
@testset "Problem $(nlp.meta.name)" for nlp in problem_set
solver = Solver(nlp.meta)
stats = with_logger(NullLogger()) do
solver(nlp)
SolverCore.solve!(solver, nlp)
end
ng0 = rtol != 0 ? norm(grad(nlp, nlp.meta.x0)) : 0
@test isapprox(stats.solution, ones(nlp.meta.nvar), atol = atol + rtol * ng0)
Expand Down
39 changes: 20 additions & 19 deletions src/nlp/equality-constrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,33 +3,33 @@ export equality_constrained_nlp
function equality_constrained_nlp_set()
n = 30
return [
ADNLPModel(x -> 2x[1]^2 + x[1] * x[2] + x[2]^2 - 9x[1] - 9x[2] + 14,
[1.0; 2.0],
x -> [4x[1] + 6x[2] - 10],
ADNLPModel(x -> 2x[1]^2 + x[1] * x[2] + x[2]^2 - 9x[1] - 9x[2] + 14,
[1.0; 2.0],
x -> [4x[1] + 6x[2] - 10],
zeros(1), zeros(1),
name = "Simple quadratic problem"),
ADNLPModel(x -> (x[1] - 1)^2,
[-1.2; 1.0],
x -> [10 * (x[2] - x[1]^2)],
zeros(1), zeros(1),
ADNLPModel(x -> (x[1] - 1)^2,
[-1.2; 1.0],
x -> [10 * (x[2] - x[1]^2)],
zeros(1), zeros(1),
name = "HS6"),
ADNLPModel(x -> (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2,
[-1.2; 1.0],
x -> [(x[1] - 2)^2 + (x[2] - 2)^2 - 2],
ADNLPModel(x -> (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2,
[-1.2; 1.0],
x -> [(x[1] - 2)^2 + (x[2] - 2)^2 - 2],
zeros(1), zeros(1),
name = "Rosenbrock with (x₁-2)²+(x₂-2)²=2"),
ADNLPModel(x -> -x[1] + 1,
[0.5; 1/3],
x -> [16x[1]^2 + 9x[2]^2 - 25;
4x[1] * 3x[2] - 12],
ADNLPModel(x -> -x[1] + 1,
[0.5; 1/3],
x -> [16x[1]^2 + 9x[2]^2 - 25;
4x[1] * 3x[2] - 12],
zeros(2), zeros(2),
name = "scaled HS8"),
ADNLPModel(x -> dot(x, x) - n,
ADNLPModel(x -> dot(x, x) - n,
zeros(n),
x -> [sum(x) - n],
x -> [sum(x) - n],
zeros(1), zeros(1),
name = "‖x‖² s.t. ∑x = n"),
ADNLPModel(x -> (x[1] - 1.0)^2 + 100 * (x[2] - x[1]^2)^2,
ADNLPModel(x -> (x[1] - 1.0)^2 + 100 * (x[2] - x[1]^2)^2,
[-1.2; 1.0],
x -> [sum(x) - 2], [0.0], [0.0],
name = "Rosenbrock with ∑x = 2"),
Expand All @@ -42,10 +42,11 @@ end
Test the `solver` on equality-constrained problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function equality_constrained_nlp(solver; problem_set = equality_constrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
function equality_constrained_nlp(Solver; problem_set = equality_constrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
@testset "Problem $(nlp.meta.name)" for nlp in problem_set
solver = Solver(nlp.meta)
stats = with_logger(NullLogger()) do
solver(nlp)
SolverCore.solve!(solver, nlp)
end
ng0 = rtol != 0 ? norm(grad(nlp, nlp.meta.x0)) : 0
@test isapprox(stats.solution, ones(nlp.meta.nvar), atol = atol + rtol * ng0)
Expand Down
5 changes: 3 additions & 2 deletions src/nlp/multiprecision.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ The `problem_type` can be
- :eqnbnd
- :gen
"""
function multiprecision_nlp(solver, ptype)
function multiprecision_nlp(Solver, ptype)
f(x) = (x[1] - 1)^2 + 4 * (x[2] - x[1]^2)^2
c(x) = [x[1]^2 + x[2]^2]
c2(x) = [c(x); x[2] - x[1]^2 / 10]
Expand All @@ -38,8 +38,9 @@ function multiprecision_nlp(solver, ptype)

ng0 = norm(grad(nlp, nlp.meta.x0))

solver = Solver(nlp.meta)
stats = with_logger(NullLogger()) do
solver(nlp, atol=ϵ, rtol=ϵ)
solve!(solver, nlp, atol=ϵ, rtol=ϵ)
end
@test eltype(stats.solution) == T
@test stats.objective isa T
Expand Down
5 changes: 3 additions & 2 deletions src/nlp/unconstrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,11 @@ end
Test the `solver` on unconstrained problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function unconstrained_nlp(solver; problem_set = unconstrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
function unconstrained_nlp(Solver; problem_set = unconstrained_nlp_set(), atol = 1e-6, rtol = 1e-6)
@testset "Problem $(nlp.meta.name)" for nlp in problem_set
solver = Solver(nlp.meta)
stats = with_logger(NullLogger()) do
solver(nlp)
SolverCore.solve!(solver, nlp)
end
ng0 = rtol != 0 ? norm(grad(nlp, nlp.meta.x0)) : 0
@test isapprox(stats.solution, ones(nlp.meta.nvar), atol = atol + rtol * ng0)
Expand Down
5 changes: 3 additions & 2 deletions src/nls/bound-constrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,11 @@ end
Test the `solver` on bound-constrained nonlinear least-squares problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function bound_constrained_nls(solver; problem_set = bound_constrained_nls_set(), atol = 1e-6, rtol = 1e-6)
function bound_constrained_nls(Solver; problem_set = bound_constrained_nls_set(), atol = 1e-6, rtol = 1e-6)
@testset "Problem $(nls.meta.name)" for nls in problem_set
solver = Solver(nls.meta)
stats = with_logger(NullLogger()) do
solver(nls)
SolverCore.solve!(solver, nls)
end
ng0 = rtol != 0 ? norm(grad(nls, nls.meta.x0)) : 0
@test isapprox(stats.solution, ones(nls.meta.nvar), atol = atol + rtol * ng0)
Expand Down
15 changes: 8 additions & 7 deletions src/nls/equality-constrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@ export equality_constrained_nls
function equality_constrained_nls_set()
n = 10
return [
ADNLSModel(x -> [x[1] - 1],
ADNLSModel(x -> [x[1] - 1],
[-1.2; 1.0], 1,
x -> [10 * (x[2] - x[1]^2)],
zeros(1), zeros(1),
x -> [10 * (x[2] - x[1]^2)],
zeros(1), zeros(1),
name = "HS6"),
ADNLSModel(x -> [x[1] - 1 ; 10 * (x[2] - x[1]^2)],
ADNLSModel(x -> [x[1] - 1 ; 10 * (x[2] - x[1]^2)],
[-1.2; 1.0], 2,
x -> [(x[1] - 2)^2 + (x[2] - 2)^2 - 2],
x -> [(x[1] - 2)^2 + (x[2] - 2)^2 - 2],
zeros(1), zeros(1),
name = "Rosenbrock with (x₁-2)²+(x₂-2)²=2"),
ADNLSModel(x -> [x[1] - 1 ; 10 * (x[2] - x[1]^2)],
Expand Down Expand Up @@ -49,10 +49,11 @@ end
Test the `solver` on equality-constrained problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function equality_constrained_nls(solver; problem_set = equality_constrained_nls_set(), atol = 1e-6, rtol = 1e-6)
function equality_constrained_nls(Solver; problem_set = equality_constrained_nls_set(), atol = 1e-6, rtol = 1e-6)
@testset "Problem $(nls.meta.name)" for nls in problem_set
solver = Solver(nls.meta)
stats = with_logger(NullLogger()) do
solver(nls)
SolverCore.solve!(solver, nls)
end
ng0 = rtol != 0 ? norm(grad(nls, nls.meta.x0)) : 0
@test isapprox(stats.solution, ones(nls.meta.nvar), atol = atol + rtol * ng0)
Expand Down
5 changes: 3 additions & 2 deletions src/nls/multiprecision.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ The `problem_type` can be
- :eqnbnd
- :gen
"""
function multiprecision_nls(solver, ptype)
function multiprecision_nls(Solver, ptype)
F(x) = [x[1] - 1; 2 * (x[2] - x[1]^2)]
c(x) = [x[1]^2 + x[2]^2]
c2(x) = [c(x); x[2] - x[1]^2 / 10]
Expand All @@ -38,8 +38,9 @@ function multiprecision_nls(solver, ptype)

ng0 = norm(grad(nls, nls.meta.x0))

solver = Solver(nls)
stats = with_logger(NullLogger()) do
solver(nls, atol=ϵ, rtol=ϵ)
solve!(solver, nls, atol=ϵ, rtol=ϵ)
end
@test eltype(stats.solution) == T
@test stats.objective isa T
Expand Down
7 changes: 4 additions & 3 deletions src/nls/unconstrained.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,12 @@ end
Test the `solver` on unconstrained nonlinear least-squares problems.
If `rtol` is non-zero, the relative error uses the gradient at the initial guess.
"""
function unconstrained_nls(solver; problem_set = unconstrained_nls_set(), atol = 1e-6, rtol = 1e-6)
function unconstrained_nls(Solver; problem_set = unconstrained_nls_set(), atol = 1e-6, rtol = 1e-6)

@testset "Problem $(nls.meta.name)" for nls in problem_set
solver = Solver(nls.meta)
stats = with_logger(NullLogger()) do
solver(nls)
SolverCore.solve!(solver, nls)
end
ng0 = rtol != 0 ? norm(grad(nls, nls.meta.x0)) : 0
@test isapprox(stats.solution, ones(nls.meta.nvar), atol = atol + rtol * ng0)
Expand Down
44 changes: 36 additions & 8 deletions test/dummy-solver.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,44 @@
function dummy(
mutable struct DummySolver{T, S} <: AbstractOptSolver{T, S}
initialized::Bool
params::Dict
workspace
end

function DummySolver(
meta::AbstractNLPModelMeta;
x0::S = meta.x0,
kwargs...,
) where {S}
T = eltype(x0)
nvar, ncon = meta.nvar, meta.ncon
solver = DummySolver{T, S}(
true,
Dict{Symbol,Any}(),
( # workspace
x = S(undef, nvar),
),
)
for (k, v) in kwargs
solver.params[k] = v
end
solver
end

function SolverCore.solve!(
solver :: DummySolver{T, S},
nlp :: AbstractNLPModel;
x = copy(nlp.meta.x0),
x0::S = nlp.meta.x0,
atol = 1e-6,
rtol = 1e-6,
max_time = 30.0,
max_eval = 10000,
max_iter = 1000
)
max_iter = 1000,
kwargs...
) where {T, S}

T = eltype(x)
status = :unknown
ℓ, u = T.(nlp.meta.lvar), T.(nlp.meta.uvar)
x = solver.workspace.x .= x0
x .= clamp.(x, ℓ, u)
ℓidx, uidx = findall(ℓ .> -Inf), findall(u .< Inf)
n, m = nlp.meta.nvar, nlp.meta.ncon
Expand Down Expand Up @@ -87,14 +115,14 @@ function dummy(
end
end

return GenericExecutionStats(
return OptSolverOutput(
status,
x,
nlp,
solution = x,
objective = obj(nlp, x),
dual_feas = dual,
primal_feas = primal,
elapsed_time = Δt,
iter = iter
iter = iter,
)
end
9 changes: 5 additions & 4 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# stdlib
using LinearAlgebra, Test
# JSO
using NLPModels, SolverTest, SolverTools
using NLPModels, OptSolver, SolverCore, SolverTest, SolverTools

include("dummy-solver.jl")

Expand All @@ -14,16 +14,17 @@ include("dummy-solver.jl")
bound_constrained_nls,
equality_constrained_nls,
]
foo(dummy)
foo(DummySolver)
end


@testset "Multiprecision tests" begin
for ptype in [:unc, :bnd, :equ, :ineq, :eqnbnd, :gen]
multiprecision_nlp(dummy, ptype)
multiprecision_nlp(DummySolver, ptype)
end

for ptype in [:unc, :bnd, :equ, :ineq, :eqnbnd, :gen]
multiprecision_nls(dummy, ptype)
multiprecision_nls(DummySolver, ptype)
end
end
end