Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update sparspak #253

Merged
merged 4 commits into from
Jan 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,17 @@ Reexport = "1"
SciMLBase = "1.68"
Setfield = "0.7, 0.8, 1"
SnoopPrecompile = "1"
Sparspak = "0.3"
Sparspak = "0.3.4"
UnPack = "1"
julia = "1.6"

[extras]
MultiFloats = "bdf0d083-296b-4888-a5b6-7498122e68a5"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Test", "Pkg", "Random", "SafeTestsets"]
test = ["Test", "Pkg", "Random", "SafeTestsets", "MultiFloats", "ForwardDiff"]
20 changes: 20 additions & 0 deletions docs/src/solvers/solvers.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@ For sparse LU-factorizations, `KLUFactorization` if there is less structure
to the sparsity pattern and `UMFPACKFactorization` if there is more structure.
Pardiso.jl's methods are also known to be very efficient sparse linear solvers.

While these sparse factorizations are based on implementations in other languages,
and therefore constrained to standard number types (`Float64`, `Float32` and
their complex counterparts), `SparspakFactorization` is able to handle general
number types, e.g. defined by `ForwardDiff.jl`, `MultiFloats.jl`,
or `IntervalArithmetics.jl`.

As sparse matrices get larger, iterative solvers tend to get more efficient than
factorization methods if a lower tolerance of the solution is required.

Expand Down Expand Up @@ -147,6 +153,20 @@ Base.@kwdef struct PardisoJL <: SciMLLinearSolveAlgorithm
end
```

### Sparspak.jl
This is the translation of the well-known sparse matrix software Sparspak
(Waterloo Sparse Matrix Package), solving
large sparse systems of linear algebraic equations. Sparspak is composed of the
subroutines from the book "Computer Solution of Large Sparse Positive Definite
Systems" by Alan George and Joseph Liu. Originally written in Fortran 77, later
rewritten in Fortran 90. Here is the software translated into Julia.
The Julia rewrite is released under the MIT license with an express permission
from the authors of the Fortran package. The package uses mutiple
dispatch to route around standard BLAS routines in the case e.g. of arbitrary-precision
floating point numbers or ForwardDiff.Dual.
This e.g. allows for Automatic Differentiation (AD) of a sparse-matrix solve.


### CUDA.jl

Note that `CuArrays` are supported by `GenericFactorization` in the “normal” way.
Expand Down
1 change: 1 addition & 0 deletions src/LinearSolve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ end
sol = solve(prob)
sol = solve(prob, KLUFactorization())
sol = solve(prob, UMFPACKFactorization())
sol = solve(prob, SparspakFactorization())
end
end

Expand Down
26 changes: 12 additions & 14 deletions src/factorization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -503,30 +503,28 @@ end

## SparspakFactorization is here since it's MIT licensed, not GPL

struct SparspakFactorization <: AbstractFactorization end
Base.@kwdef struct SparspakFactorization <: AbstractFactorization
reuse_symbolic::Bool = true
end

function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol,
reltol,
verbose::Bool, assumptions::OperatorAssumptions)
A = convert(AbstractMatrix, A)
p = Sparspak.Problem.Problem(size(A)...)
Sparspak.Problem.insparse!(p, A)
Sparspak.Problem.infullrhs!(p, b)
s = Sparspak.SparseSolver.SparseSolver(p)
return s
return sparspaklu(A, factorize=false)
end

function SciMLBase.solve(cache::LinearCache, alg::SparspakFactorization; kwargs...)
A = cache.A
A = convert(AbstractMatrix, A)
if cache.isfresh
p = Sparspak.Problem.Problem(size(A)...)
Sparspak.Problem.insparse!(p, A)
Sparspak.Problem.infullrhs!(p, cache.b)
s = Sparspak.SparseSolver.SparseSolver(p)
cache = set_cacheval(cache, s)
if cache.cacheval !== nothing && alg.reuse_symbolic
fact = sparspaklu!(cache.cacheval, A)
else
fact = sparspaklu(A)
end
cache = set_cacheval(cache, fact)
end
Sparspak.SparseSolver.solve!(cache.cacheval)
copyto!(cache.u, cache.cacheval.p.x)
SciMLBase.build_linear_solution(alg, cache.u, nothing, cache)
y = ldiv!(cache.u, cache.cacheval, cache.b)
SciMLBase.build_linear_solution(alg, y, nothing, cache)
end
46 changes: 44 additions & 2 deletions test/basictests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
using LinearSolve, LinearAlgebra, SparseArrays
using LinearSolve, LinearAlgebra, SparseArrays, MultiFloats, ForwardDiff
using Test
import Random

const Dual64=ForwardDiff.Dual{Nothing,Float64,1}

n = 8
A = Matrix(I, n, n)
b = ones(n)
Expand Down Expand Up @@ -124,7 +126,7 @@ end
@test X * solve(cache) ≈ b1
end

@testset "Sparspak Factorization" begin
@testset "Sparspak Factorization (Float64)" begin
A1 = sparse(A / 1)
b1 = rand(n)
x1 = zero(b)
Expand All @@ -137,6 +139,46 @@ end
test_interface(SparspakFactorization(), prob1, prob2)
end

@testset "Sparspak Factorization (Float64x1)" begin
A1 = sparse(A / 1) .|> Float64x1
b1 = rand(n) .|> Float64x1
x1 = zero(b) .|> Float64x1
A2 = sparse(A / 2) .|> Float64x1
b2 = rand(n) .|> Float64x1
x2 = zero(b) .|> Float64x1

prob1 = LinearProblem(A1, b1; u0 = x1)
prob2 = LinearProblem(A2, b2; u0 = x2)
test_interface(SparspakFactorization(), prob1, prob2)
end

@testset "Sparspak Factorization (Float64x2)" begin
A1 = sparse(A / 1) .|> Float64x2
b1 = rand(n) .|> Float64x2
x1 = zero(b) .|> Float64x2
A2 = sparse(A / 2) .|> Float64x2
b2 = rand(n) .|> Float64x2
x2 = zero(b) .|> Float64x2

prob1 = LinearProblem(A1, b1; u0 = x1)
prob2 = LinearProblem(A2, b2; u0 = x2)
test_interface(SparspakFactorization(), prob1, prob2)
end

@testset "Sparspak Factorization (Dual64)" begin
A1 = sparse(A / 1) .|> Dual64
b1 = rand(n) .|> Dual64
x1 = zero(b) .|> Dual64
A2 = sparse(A / 2) .|> Dual64
b2 = rand(n) .|> Dual64
x2 = zero(b) .|> Dual64

prob1 = LinearProblem(A1, b1; u0 = x1)
prob2 = LinearProblem(A2, b2; u0 = x2)
test_interface(SparspakFactorization(), prob1, prob2)
end


@testset "FastLAPACK Factorizations" begin
A1 = A / 1
b1 = rand(n)
Expand Down