Skip to content

Commit

Permalink
Move to Julia 0.7 (#75)
Browse files Browse the repository at this point in the history
* Update REQUIRE and update code for Julia 0.7
* Remove Compat
* Set seed in autodiff tests
  • Loading branch information
anriseth authored Jul 14, 2018
1 parent 248631c commit 40f448c
Show file tree
Hide file tree
Showing 14 changed files with 37 additions and 38 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ os:
- linux
- osx
julia:
- 0.6
- 0.7
- nightly
notifications:
email: false
Expand Down
3 changes: 1 addition & 2 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
julia 0.6
Compat
julia 0.7-alpha
DiffEqDiffTools
ForwardDiff
Calculus
5 changes: 2 additions & 3 deletions appveyor.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
environment:
matrix:
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.6-latest-win32.exe"
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe"
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.7/julia-0.7-latest-win32.exe"
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7-latest-win64.exe"
matrix:
allow_failures:
- JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe"
- JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe"

branches:
only:
- master
Expand Down
4 changes: 1 addition & 3 deletions src/NLSolversBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@ __precompile__(true)

module NLSolversBase

using Compat
using DiffEqDiffTools, ForwardDiff, Calculus
import Compat.Distributed: clear!
import Compat.LinearAlgebra: gradient
import Distributed: clear!
export AbstractObjective,
NonDifferentiable,
OnceDifferentiable,
Expand Down
4 changes: 2 additions & 2 deletions src/objective_types/constraints.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ function nconstraints_x(cb::ConstraintBounds)
me = isempty(cb.eqx) ? 0 : maximum(cb.eqx)
nmax = max(mi, me)
hasconstraint = falses(nmax)
hasconstraint[cb.ineqx] = true
hasconstraint[cb.eqx] = true
hasconstraint[cb.ineqx] .= true
hasconstraint[cb.eqx] .= true
sum(hasconstraint)
end

Expand Down
4 changes: 2 additions & 2 deletions src/objective_types/incomplete.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,15 +73,15 @@ function NonDifferentiable(t::Union{InplaceObjective, NotInplaceObjective}, x::A
end


function TwiceDifferentiable(t::InplaceObjective{<: Void, <: Void, TH}, x::AbstractArray, F::Real = real(zero(eltype(x))), G::AbstractArray = similar(x), H = alloc_H(x)) where {TH}
function TwiceDifferentiable(t::InplaceObjective{<: Nothing, <: Nothing, TH}, x::AbstractArray, F::Real = real(zero(eltype(x))), G::AbstractArray = similar(x), H = alloc_H(x)) where {TH}
f = x -> t.fgh(F, nothing, nothing, x)
df = (G, x) -> t.fgh(nothing, G, nothing, x)
fdf = (G, x) -> t.fgh(F, G, nothing, x)
h = (H, x) -> t.fgh(F, nothing, H, x)
TwiceDifferentiable(f, df, fdf, h, x, F, G, H)
end

function TwiceDifferentiable(t::InplaceObjective{<: Void, <: Void, TH}, x::AbstractVector, F::Real = real(zero(eltype(x))), G::AbstractVector = similar(x)) where {TH}
function TwiceDifferentiable(t::InplaceObjective{<: Nothing, <: Nothing, TH}, x::AbstractVector, F::Real = real(zero(eltype(x))), G::AbstractVector = similar(x)) where {TH}

H = alloc_H(x)
f = x -> t.fgh(F, nothing, nothing, x)
Expand Down
2 changes: 1 addition & 1 deletion src/objective_types/twicedifferentiablehv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ end

function gradient!!(obj::TwiceDifferentiableHV, x)
obj.df_calls .+= 1
copy!(obj.x_df, x)
copyto!(obj.x_df, x)
obj.fdf(obj.DF, x)
end

Expand Down
2 changes: 1 addition & 1 deletion test/REQUIRE
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
OptimTestProblems 1.2
OptimTestProblems 2.0
RecursiveArrayTools
10 changes: 6 additions & 4 deletions test/autodiff.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
@testset "autodiff" begin
srand(0)

# Should throw, as :wah is not a proper autodiff choice
@test_throws ErrorException OnceDifferentiable(x->x, rand(10); autodiff=:wah)
@test_throws ErrorException OnceDifferentiable(x->x, rand(10), 0.0; autodiff=:wah)
Expand Down Expand Up @@ -46,9 +48,9 @@
x = rand(nx)
f(x) = sum(x.^3)
fx = f(x)
g(G, x) = copyto!(G, 3.*x.^2)
g(G, x) = copyto!(G, 3 .* x.^2)
gx = g(NLSolversBase.alloc_DF(x, 0.0), x)
h(H, x) = copyto!(H, Diagonal(6.*x))
h(H, x) = copyto!(H, Diagonal(6 .* x))
hx = h(fill(0.0, nx, nx), x)
for dtype in (OnceDifferentiable, TwiceDifferentiable)
for autodiff in (:finite, :forward)
Expand Down Expand Up @@ -77,7 +79,7 @@
end
end
for autodiff in (:finite, :forward)
td = TwiceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x); autodiff = autodiff)
td = TwiceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x); autodiff = autodiff)
value(td)
value!(td, x)
value_gradient!(td, x)
Expand All @@ -93,7 +95,7 @@
gradient!(td, x)
hessian!(td, x)
end
for od = (OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x)), OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, ones(x)), copy(x), 0.0))
for od = (OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x)), OnceDifferentiable(x->sum(x), (G, x)->copyto!(G, fill!(copy(x),1)), copy(x), 0.0))
td = TwiceDifferentiable(od; autodiff = autodiff)
value(td)
value!(td, x)
Expand Down
1 change: 0 additions & 1 deletion test/incomplete.jl
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@

end
@testset "incomplete objectives vectors" begin
import Compat: copyto!
function tf(x)
x.^2
end
Expand Down
2 changes: 1 addition & 1 deletion test/interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@
if prob.istwicedifferentiable
hv!(storage::Vector, x::Vector, v::Vector) = begin
n = length(x)
H = Matrix{Float64}(n, n)
H = Matrix{Float64}(undef, n, n)
MVP.hessian(prob)(H, x)
storage .= H * v
end
Expand Down
2 changes: 1 addition & 1 deletion test/objective_types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
@test od.df_calls == [0]
od.x_df .= x_seed
gold = copy(od.DF)
xnew = rand(size(x_seed))
xnew = rand(eltype(x_seed), size(x_seed))
gnew = gradient(od, xnew)
@test od.x_df == x_seed
@test od.DF == gold
Expand Down
10 changes: 6 additions & 4 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
using NLSolversBase
using Compat, Compat.Test, Compat.Random, Compat.LinearAlgebra
using NLSolversBase, Test
using Random
using LinearAlgebra: Diagonal, I
using SparseArrays
using OptimTestProblems
using RecursiveArrayTools
MVP = OptimTestProblems.MultivariateProblems

# TODO: Use OptimTestProblems
# TODO: MultivariateProblems.UnconstrainedProblems.exampples["Exponential"]
# TODO: Use OptimTestProblems (but it does not have exponential_gradient_hession etc.)
# TODO: MultivariateProblems.UnconstrainedProblems.examples["Exponential"]

# Test example
function exponential(x)
Expand Down
24 changes: 12 additions & 12 deletions test/sparse.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
@testset "sparse" begin
@testset "𝐑ⁿ → 𝐑" begin
f(x) = sum(x->x.^2, x)
g(G, x) = copy!(G, 2.*x)
h(H, x) = H .= speye(size(H)...).*2
g(G, x) = copy!(G, 2 .* x)
h(H, x) = H .= sparse(2.0I, size(H)...)

obj_dense = TwiceDifferentiable(f, g, h, rand(40))
@test !issparse(obj_dense.H)

obj_sparse = TwiceDifferentiable(f, g, h, rand(40), 0.0, rand(40), speye(40))
obj_sparse = TwiceDifferentiable(f, g, h, rand(40), 0.0, rand(40), sparse(1.0I, 40, 40))
@test typeof(obj_sparse.H) <: SparseMatrixCSC

function fgh!(F, G, H, x)
Expand All @@ -16,20 +16,20 @@
end

if !(G == nothing)
copy!(G, 2.*x)
copy!(G, 2 .* x)
end
if !(H == nothing)
H .= speye(size(H)...).*2
H .= sparse(2.0I, size(H)...)
end
return fx
end

obj_fgh = TwiceDifferentiable(NLSolversBase.only_fgh!(fgh!), rand(40), 0.0, rand(40), speye(40))
obj_fgh = TwiceDifferentiable(NLSolversBase.only_fgh!(fgh!), rand(40), 0.0, rand(40), sparse(1.0I, 40, 40))
@test typeof(obj_fgh.H) <: SparseMatrixCSC
end
@testset "𝐑ⁿ → 𝐑ⁿ" begin
f(F, x) = copy!(F, 2.*x)
j(J, x) = J .= speye(size(J)...).*2
f(F, x) = copy!(F, 2 .* x)
j(J, x) = J .= sparse(2.0I, size(J)...)

# Test that with no spec on the Jacobian cache it is dense
obj_dense = OnceDifferentiable(f, j, rand(40), rand(40))
Expand All @@ -39,20 +39,20 @@
@test !issparse(obj_dense.DF)


obj_sparse = OnceDifferentiable(f, j, rand(40), rand(40), speye(40))
obj_sparse = OnceDifferentiable(f, j, rand(40), rand(40), sparse(1.0I, 40, 40))
@test typeof(obj_sparse.DF) <: SparseMatrixCSC

function fj!(F, J, x)
if !(F == nothing)
copy!(G, 2.*x)
copy!(G, 2 .* x)
end
if !(J == nothing)
J .= speye(size(H)...).*2
J .= sparse(2.0I, size(J)...)
end
return fx
end

obj_fj = OnceDifferentiable(NLSolversBase.only_fj!(fj!), rand(40), rand(40), speye(40))
obj_fj = OnceDifferentiable(NLSolversBase.only_fj!(fj!), rand(40), rand(40), sparse(1.0I, 40, 40))
@test typeof(obj_fj.DF) <: SparseMatrixCSC
end
end

0 comments on commit 40f448c

Please sign in to comment.