diff --git a/.travis.yml b/.travis.yml index 29353cc..07869c4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ os: - linux - osx julia: - - 0.6 + - 0.7 - nightly notifications: email: false diff --git a/REQUIRE b/REQUIRE index 053f9fc..139b9ef 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,5 +1,4 @@ -julia 0.6 -Compat +julia 0.7-alpha DiffEqDiffTools ForwardDiff Calculus diff --git a/appveyor.yml b/appveyor.yml index 0dba1b9..b663e0a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,12 +1,11 @@ environment: matrix: - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.6-latest-win32.exe" - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.7/julia-0.7-latest-win32.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7-latest-win64.exe" matrix: allow_failures: - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe" - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe" - branches: only: - master diff --git a/src/NLSolversBase.jl b/src/NLSolversBase.jl index 8a38ea5..32b3746 100644 --- a/src/NLSolversBase.jl +++ b/src/NLSolversBase.jl @@ -2,10 +2,8 @@ __precompile__(true) module NLSolversBase -using Compat using DiffEqDiffTools, ForwardDiff, Calculus -import Compat.Distributed: clear! -import Compat.LinearAlgebra: gradient +import Distributed: clear! export AbstractObjective, NonDifferentiable, OnceDifferentiable, diff --git a/src/objective_types/constraints.jl b/src/objective_types/constraints.jl index 26ac2d8..042770d 100644 --- a/src/objective_types/constraints.jl +++ b/src/objective_types/constraints.jl @@ -71,8 +71,8 @@ function nconstraints_x(cb::ConstraintBounds) me = isempty(cb.eqx) ? 0 : maximum(cb.eqx) nmax = max(mi, me) hasconstraint = falses(nmax) - hasconstraint[cb.ineqx] = true - hasconstraint[cb.eqx] = true + hasconstraint[cb.ineqx] .= true + hasconstraint[cb.eqx] .= true sum(hasconstraint) end diff --git a/src/objective_types/incomplete.jl b/src/objective_types/incomplete.jl index fe04654..6b2bec2 100644 --- a/src/objective_types/incomplete.jl +++ b/src/objective_types/incomplete.jl @@ -73,7 +73,7 @@ function NonDifferentiable(t::Union{InplaceObjective, NotInplaceObjective}, x::A end -function TwiceDifferentiable(t::InplaceObjective{<: Void, <: Void, TH}, x::AbstractArray, F::Real = real(zero(eltype(x))), G::AbstractArray = similar(x), H = alloc_H(x)) where {TH} +function TwiceDifferentiable(t::InplaceObjective{<: Nothing, <: Nothing, TH}, x::AbstractArray, F::Real = real(zero(eltype(x))), G::AbstractArray = similar(x), H = alloc_H(x)) where {TH} f = x -> t.fgh(F, nothing, nothing, x) df = (G, x) -> t.fgh(nothing, G, nothing, x) fdf = (G, x) -> t.fgh(F, G, nothing, x) @@ -81,7 +81,7 @@ function TwiceDifferentiable(t::InplaceObjective{<: Void, <: Void, TH}, x::Abstr TwiceDifferentiable(f, df, fdf, h, x, F, G, H) end -function TwiceDifferentiable(t::InplaceObjective{<: Void, <: Void, TH}, x::AbstractVector, F::Real = real(zero(eltype(x))), G::AbstractVector = similar(x)) where {TH} +function TwiceDifferentiable(t::InplaceObjective{<: Nothing, <: Nothing, TH}, x::AbstractVector, F::Real = real(zero(eltype(x))), G::AbstractVector = similar(x)) where {TH} H = alloc_H(x) f = x -> t.fgh(F, nothing, nothing, x) diff --git a/src/objective_types/twicedifferentiablehv.jl b/src/objective_types/twicedifferentiablehv.jl index a73befc..2f83a25 100644 --- a/src/objective_types/twicedifferentiablehv.jl +++ b/src/objective_types/twicedifferentiablehv.jl @@ -30,7 +30,7 @@ end function gradient!!(obj::TwiceDifferentiableHV, x) obj.df_calls .+= 1 - copy!(obj.x_df, x) + copyto!(obj.x_df, x) obj.fdf(obj.DF, x) end diff --git a/test/REQUIRE b/test/REQUIRE index 11c6425..f1d2ead 100644 --- a/test/REQUIRE +++ b/test/REQUIRE @@ -1,2 +1,2 @@ -OptimTestProblems 1.2 +OptimTestProblems 2.0 RecursiveArrayTools diff --git a/test/autodiff.jl b/test/autodiff.jl index a7b300c..bb96ece 100644 --- a/test/autodiff.jl +++ b/test/autodiff.jl @@ -1,4 +1,6 @@ @testset "autodiff" begin + srand(0) + # Should throw, as :wah is not a proper autodiff choice @test_throws ErrorException OnceDifferentiable(x->x, rand(10); autodiff=:wah) @test_throws ErrorException OnceDifferentiable(x->x, rand(10), 0.0; autodiff=:wah) @@ -46,9 +48,9 @@ x = rand(nx) f(x) = sum(x.^3) fx = f(x) - g(G, x) = copyto!(G, 3.*x.^2) + g(G, x) = copyto!(G, 3 .* x.^2) gx = g(NLSolversBase.alloc_DF(x, 0.0), x) - h(H, x) = copyto!(H, Diagonal(6.*x)) + h(H, x) = copyto!(H, Diagonal(6 .* x)) hx = h(fill(0.0, nx, nx), x) for dtype in (OnceDifferentiable, TwiceDifferentiable) for autodiff in (:finite, :forward) @@ -77,7 +79,7 @@ end end for autodiff in (:finite, :forward) - td = TwiceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x); autodiff = autodiff) + td = TwiceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x); autodiff = autodiff) value(td) value!(td, x) value_gradient!(td, x) @@ -93,7 +95,7 @@ gradient!(td, x) hessian!(td, x) end - for od = (OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x)), OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x), 0.0)) + for od = (OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x)), OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x), 0.0)) td = TwiceDifferentiable(od; autodiff = autodiff) value(td) value!(td, x) diff --git a/test/incomplete.jl b/test/incomplete.jl index f509ecb..686c825 100644 --- a/test/incomplete.jl +++ b/test/incomplete.jl @@ -85,7 +85,6 @@ end @testset "incomplete objectives vectors" begin - import Compat: copyto! function tf(x) x.^2 end diff --git a/test/interface.jl b/test/interface.jl index 3009a4a..52f5f66 100644 --- a/test/interface.jl +++ b/test/interface.jl @@ -153,7 +153,7 @@ if prob.istwicedifferentiable hv!(storage::Vector, x::Vector, v::Vector) = begin n = length(x) - H = Matrix{Float64}(n, n) + H = Matrix{Float64}(undef, n, n) MVP.hessian(prob)(H, x) storage .= H * v end diff --git a/test/objective_types.jl b/test/objective_types.jl index 879cdbe..ac076eb 100644 --- a/test/objective_types.jl +++ b/test/objective_types.jl @@ -17,7 +17,7 @@ @test od.df_calls == [0] od.x_df .= x_seed gold = copy(od.DF) - xnew = rand(size(x_seed)) + xnew = rand(eltype(x_seed), size(x_seed)) gnew = gradient(od, xnew) @test od.x_df == x_seed @test od.DF == gold diff --git a/test/runtests.jl b/test/runtests.jl index 8cc3599..0d3f2f3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,11 +1,13 @@ -using NLSolversBase -using Compat, Compat.Test, Compat.Random, Compat.LinearAlgebra +using NLSolversBase, Test +using Random +using LinearAlgebra: Diagonal, I +using SparseArrays using OptimTestProblems using RecursiveArrayTools MVP = OptimTestProblems.MultivariateProblems -# TODO: Use OptimTestProblems -# TODO: MultivariateProblems.UnconstrainedProblems.exampples["Exponential"] +# TODO: Use OptimTestProblems (but it does not have exponential_gradient_hession etc.) +# TODO: MultivariateProblems.UnconstrainedProblems.examples["Exponential"] # Test example function exponential(x) diff --git a/test/sparse.jl b/test/sparse.jl index b4f7001..40df7e4 100644 --- a/test/sparse.jl +++ b/test/sparse.jl @@ -1,13 +1,13 @@ @testset "sparse" begin @testset "𝐑ⁿ → 𝐑" begin f(x) = sum(x->x.^2, x) - g(G, x) = copy!(G, 2.*x) - h(H, x) = H .= speye(size(H)...).*2 + g(G, x) = copy!(G, 2 .* x) + h(H, x) = H .= sparse(2.0I, size(H)...) obj_dense = TwiceDifferentiable(f, g, h, rand(40)) @test !issparse(obj_dense.H) - obj_sparse = TwiceDifferentiable(f, g, h, rand(40), 0.0, rand(40), speye(40)) + obj_sparse = TwiceDifferentiable(f, g, h, rand(40), 0.0, rand(40), sparse(1.0I, 40, 40)) @test typeof(obj_sparse.H) <: SparseMatrixCSC function fgh!(F, G, H, x) @@ -16,20 +16,20 @@ end if !(G == nothing) - copy!(G, 2.*x) + copy!(G, 2 .* x) end if !(H == nothing) - H .= speye(size(H)...).*2 + H .= sparse(2.0I, size(H)...) end return fx end - obj_fgh = TwiceDifferentiable(NLSolversBase.only_fgh!(fgh!), rand(40), 0.0, rand(40), speye(40)) + obj_fgh = TwiceDifferentiable(NLSolversBase.only_fgh!(fgh!), rand(40), 0.0, rand(40), sparse(1.0I, 40, 40)) @test typeof(obj_fgh.H) <: SparseMatrixCSC end @testset "𝐑ⁿ → 𝐑ⁿ" begin - f(F, x) = copy!(F, 2.*x) - j(J, x) = J .= speye(size(J)...).*2 + f(F, x) = copy!(F, 2 .* x) + j(J, x) = J .= sparse(2.0I, size(J)...) # Test that with no spec on the Jacobian cache it is dense obj_dense = OnceDifferentiable(f, j, rand(40), rand(40)) @@ -39,20 +39,20 @@ @test !issparse(obj_dense.DF) - obj_sparse = OnceDifferentiable(f, j, rand(40), rand(40), speye(40)) + obj_sparse = OnceDifferentiable(f, j, rand(40), rand(40), sparse(1.0I, 40, 40)) @test typeof(obj_sparse.DF) <: SparseMatrixCSC function fj!(F, J, x) if !(F == nothing) - copy!(G, 2.*x) + copy!(G, 2 .* x) end if !(J == nothing) - J .= speye(size(H)...).*2 + J .= sparse(2.0I, size(J)...) end return fx end - obj_fj = OnceDifferentiable(NLSolversBase.only_fj!(fj!), rand(40), rand(40), speye(40)) + obj_fj = OnceDifferentiable(NLSolversBase.only_fj!(fj!), rand(40), rand(40), sparse(1.0I, 40, 40)) @test typeof(obj_fj.DF) <: SparseMatrixCSC end end