From f365d034acd809d0ddd25c73154ed97c726e4d4e Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Fri, 18 May 2018 11:31:07 -0700 Subject: [PATCH 01/21] Deprecate lufact to lu. --- NEWS.md | 5 ++ stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 12 ++-- stdlib/LinearAlgebra/src/deprecated.jl | 8 +++ stdlib/LinearAlgebra/src/factorization.jl | 2 +- stdlib/LinearAlgebra/src/generic.jl | 6 +- stdlib/LinearAlgebra/src/lu.jl | 76 ++++++++++------------- stdlib/LinearAlgebra/src/symmetric.jl | 6 +- stdlib/LinearAlgebra/test/generic.jl | 4 +- stdlib/LinearAlgebra/test/lapack.jl | 2 +- stdlib/LinearAlgebra/test/lu.jl | 32 +++++----- stdlib/LinearAlgebra/test/symmetric.jl | 2 +- stdlib/LinearAlgebra/test/triangular.jl | 14 ++--- stdlib/SparseArrays/src/linalg.jl | 7 +-- stdlib/SparseArrays/test/sparse.jl | 3 +- stdlib/SuiteSparse/src/cholmod.jl | 6 +- stdlib/SuiteSparse/src/deprecated.jl | 9 +++ stdlib/SuiteSparse/src/umfpack.jl | 26 ++++---- stdlib/SuiteSparse/test/umfpack.jl | 26 ++++---- 20 files changed, 129 insertions(+), 119 deletions(-) diff --git a/NEWS.md b/NEWS.md index 16a93a529b1a9..b5a14785da79b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -227,6 +227,9 @@ This section lists changes that do not have deprecation warnings. * `readuntil` now does *not* include the delimiter in its result, matching the behavior of `readline`. Pass `keep=true` to get the old behavior ([#25633]). + * `lu` methods now return decomposition objects such as `LU` rather than + tuples of arrays or tuples of numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -675,6 +678,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). + * `lufact` has been deprecated to `lu` ([#27159]). + * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. Instead, reshape the array or add trailing indices so the dimensionality and number of indices diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 8ccffe52ee8de..ed7bbef38db51 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -312,7 +312,6 @@ LinearAlgebra.LowerTriangular LinearAlgebra.UpperTriangular LinearAlgebra.UniformScaling LinearAlgebra.lu -LinearAlgebra.lufact LinearAlgebra.lufact! LinearAlgebra.chol LinearAlgebra.cholfact diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index eeef2b912be53..b11c223c87db2 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -112,7 +112,6 @@ export lowrankupdate, lowrankupdate!, lu, - lufact, lufact!, lyap, mul!, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 101b6c7230e63..8d84cc2e4ea9e 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -721,7 +721,7 @@ function inv(A::StridedMatrix{T}) where T elseif istril(AA) Ai = tril!(parent(inv(LowerTriangular(AA)))) else - Ai = inv!(lufact(AA)) + Ai = inv!(lu(AA)) Ai = convert(typeof(parent(Ai)), Ai) end return Ai @@ -1118,9 +1118,9 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Triangular | Triangular | | Diagonal | Diagonal | | Bidiagonal | Bidiagonal | -| Tridiagonal | LU (see [`lufact`](@ref)) | +| Tridiagonal | LU (see [`lu`](@ref)) | | Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | -| General square | LU (see [`lufact`](@ref)) | +| General square | LU (see [`lu`](@ref)) | | General non-square | QR (see [`qrfact`](@ref)) | If `factorize` is called on a Hermitian positive-definite matrix, for instance, then `factorize` @@ -1201,7 +1201,7 @@ function factorize(A::StridedMatrix{T}) where T return ldltfact!(SymTridiagonal(diag(A), diag(A, -1))) end end - return lufact(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) + return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) end end if utri @@ -1218,7 +1218,7 @@ function factorize(A::StridedMatrix{T}) where T if sym return factorize(Symmetric(A)) end - return lufact(A) + return lu(A) end qrfact(A, Val(true)) end @@ -1367,7 +1367,7 @@ function cond(A::AbstractMatrix, p::Real=2) end throw(ArgumentError("p-norm must be 1, 2 or Inf, got $p")) end -_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lufact(A), p, norm(A, p)) +_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lu(A), p, norm(A, p)) _cond1Inf(A::AbstractMatrix, p::Real) = norm(A, p)*norm(inv(A), p) ## Lyapunov and Sylvester equation diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index f7cb520809415..7a9472daf400e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1260,3 +1260,11 @@ end @deprecate scale!(C::AbstractMatrix, a::AbstractVector, B::AbstractMatrix) mul!(C, Diagonal(a), B) Base.@deprecate_binding trace tr + +# deprecate lufact to lu +export lufact +@deprecate(lufact(S::LU), lu(S)) +@deprecate(lufact(x::Number), lu(x)) +@deprecate(lufact(A::AbstractMatrix{T}) where T, lu(A)) +@deprecate(lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T, lu(A, pivot)) +@deprecate(lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat}, lu(A, pivot)) diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl index d08a55e4d11b5..b0674ae64f683 100644 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -27,7 +27,7 @@ julia> F = cholfact([1 0; 0 1]); julia> LinearAlgebra.issuccess(F) true -julia> F = lufact([1 0; 0 0]); +julia> F = lu([1 0; 0 0]); julia> LinearAlgebra.issuccess(F) false diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 2ecd4220095b4..43ab58bbd4522 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -862,7 +862,7 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat) if istriu(A) return UpperTriangular(A) \ B end - return lufact(A) \ B + return lu(A) \ B end return qrfact(A,Val(true)) \ B end @@ -1270,7 +1270,7 @@ function det(A::AbstractMatrix{T}) where T S = typeof((one(T)*zero(T) + zero(T))/one(T)) return convert(S, det(UpperTriangular(A))) end - return det(lufact(A)) + return det(lu(A)) end det(x::Number) = x @@ -1305,7 +1305,7 @@ julia> logabsdet(B) (0.6931471805599453, 1.0) ``` """ -logabsdet(A::AbstractMatrix) = logabsdet(lufact(A)) +logabsdet(A::AbstractMatrix) = logabsdet(lu(A)) """ logdet(M) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index 34b6efe660940..cfdbee76376a4 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -11,6 +11,20 @@ struct LU{T,S<:AbstractMatrix} <: Factorization{T} end LU(factors::AbstractMatrix{T}, ipiv::Vector{BlasInt}, info::BlasInt) where {T} = LU{T,typeof(factors)}(factors, ipiv, info) +# iteration for destructuring into components +Base.iterate(S::LU) = (S.L, Val(:U)) +Base.iterate(S::LU, ::Val{:U}) = (S.U, Val(:p)) +Base.iterate(S::LU, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::LU, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::LU, i::Integer) + i == 1 ? (return S.L) : + i == 2 ? (return S.U) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end + adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) @@ -30,7 +44,7 @@ end """ lufact!(A, pivot=Val(true)) -> LU -`lufact!` is the same as [`lufact`](@ref), but saves space by overwriting the +`lufact!` is the same as [`lu`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -114,13 +128,14 @@ function generic_lufact!(A::StridedMatrix{T}, ::Val{Pivot} = Val(true)) where {T end # floating point types doesn't have to be promoted for LU, but should default to pivoting -lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, - pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} = - lufact!(copy(A), pivot) +function lu(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, + pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} + lufact!(copy(A), pivot) +end # for all other types we must promote to a type which is stable under division """ - lufact(A, pivot=Val(true)) -> F::LU + lu(A, pivot=Val(true)) -> F::LU Compute the LU factorization of `A`. @@ -129,7 +144,7 @@ type `T` supporting `+`, `-`, `*` and `/`, the return type is `LU{T,S{T}}`. If pivoting is chosen (default) the element type should also support `abs` and `<`. -The individual components of the factorization `F` can be accessed by indexing: +The individual components of the factorization `F` can be accessed via `getproperty`: | Component | Description | |:----------|:------------------------------------| @@ -138,6 +153,8 @@ The individual components of the factorization `F` can be accessed by indexing: | `F.p` | (right) permutation `Vector` | | `F.P` | (right) permutation `Matrix` | +Iterating the factorization produces the components `F.L`, `F.U`, and `F.p`. + The relationship between `F` and `A` is `F.L*F.U == A[F.p, :]` @@ -161,7 +178,7 @@ julia> A = [4 3; 6 3] 4 3 6 3 -julia> F = lufact(A) +julia> F = lu(A) LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: @@ -174,16 +191,21 @@ U factor: julia> F.L * F.U == A[F.p, :] true + +julia> l, u, p = lu(A); # destructuring via iteration + +julia> l == F.L && u == F.U && p == F.p +true ``` """ -function lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T +function lu(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) lufact!(AA, pivot) end # We can't assume an ordered field so we first try without pivoting -function lufact(A::AbstractMatrix{T}) where T +function lu(A::AbstractMatrix{T}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) @@ -197,38 +219,8 @@ function lufact(A::AbstractMatrix{T}) where T end end -lufact(x::Number) = LU(fill(x, 1, 1), BlasInt[1], x == 0 ? one(BlasInt) : zero(BlasInt)) -lufact(F::LU) = F - -lu(x::Number) = (one(x), x, 1) - -""" - lu(A, pivot=Val(true)) -> L, U, p - -Compute the LU factorization of `A`, such that `A[p,:] = L*U`. -By default, pivoting is used. This can be overridden by passing -`Val(false)` for the second argument. - -See also [`lufact`](@ref). - -# Examples -```jldoctest -julia> A = [4. 3.; 6. 3.] -2×2 Array{Float64,2}: - 4.0 3.0 - 6.0 3.0 - -julia> L, U, p = lu(A) -([1.0 0.0; 0.666667 1.0], [6.0 3.0; 0.0 1.0], [2, 1]) - -julia> A[p, :] == L * U -true -``` -""" -function lu(A::AbstractMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) - F = lufact(A, pivot) - F.L, F.U, F.p -end +lu(S::LU) = S +lu(x::Number) = LU(fill(x, 1, 1), BlasInt[1], x == 0 ? one(BlasInt) : zero(BlasInt)) function LU{T}(F::LU) where T M = convert(AbstractMatrix{T}, F.factors) @@ -459,7 +451,7 @@ function lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val( LU{T,Tridiagonal{T,V}}(B, ipiv, convert(BlasInt, info)) end -factorize(A::Tridiagonal) = lufact(A) +factorize(A::Tridiagonal) = lu(A) function getproperty(F::LU{T,Tridiagonal{T,V}}, d::Symbol) where {T,V} m, n = size(F) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 4c2f5e44ff507..b4c69c85ef47d 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -442,7 +442,7 @@ function factorize(A::HermOrSym{T}) where T if TT <: BlasFloat return bkfact(A) else # fallback - return lufact(A) + return lu(A) end end @@ -453,11 +453,11 @@ det(A::Symmetric) = det(factorize(A)) \(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractVector) = \(factorize(A), B) # Bunch-Kaufman solves can not utilize BLAS-3 for multiple right hand sides # so using LU is faster for AbstractMatrix right hand side -\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractMatrix) = \(lufact(A), B) +\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractMatrix) = \(lu(A), B) function _inv(A::HermOrSym) n = checksquare(A) - B = inv!(lufact(A)) + B = inv!(lu(A)) conjugate = isa(A, Hermitian) # symmetrize if A.uplo == 'U' # add to upper triangle diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 05118849e9a8a..6108a563bbb29 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -348,13 +348,13 @@ LinearAlgebra.Transpose(a::ModInt{n}) where {n} = transpose(a) A = [ModInt{2}(1) ModInt{2}(0); ModInt{2}(1) ModInt{2}(1)] b = [ModInt{2}(1), ModInt{2}(0)] - @test A*(lufact(A, Val(false))\b) == b + @test A*(lu(A, Val(false))\b) == b # Needed for pivoting: Base.abs(a::ModInt{n}) where {n} = a Base.:<(a::ModInt{n}, b::ModInt{n}) where {n} = a.k < b.k - @test A*(lufact(A, Val(true))\b) == b + @test A*(lu(A, Val(true))\b) == b end @testset "fallback throws properly for AbstractArrays with dimension > 2" begin diff --git a/stdlib/LinearAlgebra/test/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl index 2eefa52b69acc..642827da3082a 100644 --- a/stdlib/LinearAlgebra/test/lapack.jl +++ b/stdlib/LinearAlgebra/test/lapack.jl @@ -266,7 +266,7 @@ end @test_throws DimensionMismatch LAPACK.gttrs!('N', x11, d, du, x9, y10, b) @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, x11, x9, y10, b) @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, du, x9, y10, x11) - A = lufact(Tridiagonal(dl,d,du)) + A = lu(Tridiagonal(dl,d,du)) b = rand(elty,10,5) c = copy(b) dl,d,du,du2,ipiv = LAPACK.gttrf!(dl,d,du) diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index bf6de1cb38127..96eab7b13fc88 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -42,8 +42,8 @@ dimg = randn(n)/2 if eltya <: BlasFloat @testset "LU factorization for Number" begin num = rand(eltya) - @test lu(num) == (one(eltya),num,1) - @test convert(Array, lufact(num)) ≈ eltya[num] + @test (lu(num)...,) == (hcat(one(eltya)), hcat(num), [1]) + @test convert(Array, lu(num)) ≈ eltya[num] end @testset "Balancing in eigenvector calculations" begin A = convert(Matrix{eltya}, [ 3.0 -2.0 -0.9 2*eps(real(one(eltya))); @@ -58,7 +58,7 @@ dimg = randn(n)/2 end end @testset "Singular LU" begin - lua = lufact(zeros(eltya, 3, 3)) + lua = lu(zeros(eltya, 3, 3)) @test !LinearAlgebra.issuccess(lua) @test sprint((t, s) -> show(t, "text/plain", s), lua) == "Failed factorization of type $(typeof(lua))" end @@ -85,9 +85,9 @@ dimg = randn(n)/2 end κd = cond(Array(d),1) @testset "Tridiagonal LU" begin - lud = lufact(d) + lud = lu(d) @test LinearAlgebra.issuccess(lud) - @test lufact(lud) == lud + @test lu(lud) == lud @test_throws ErrorException lud.Z @test lud.L*lud.U ≈ lud.P*Array(d) @test lud.L*lud.U ≈ Array(d)[lud.p,:] @@ -173,22 +173,22 @@ dimg = randn(n)/2 du[1] = zero(eltya) dl[1] = zero(eltya) zT = Tridiagonal(dl,dd,du) - @test !LinearAlgebra.issuccess(lufact(zT)) + @test !LinearAlgebra.issuccess(lu(zT)) end end @testset "Thin LU" begin - lua = @inferred lufact(a[:,1:n1]) + lua = @inferred lu(a[:,1:n1]) @test lua.L*lua.U ≈ lua.P*a[:,1:n1] end @testset "Fat LU" begin - lua = lufact(a[1:n1,:]) + lua = lu(a[1:n1,:]) @test lua.L*lua.U ≈ lua.P*a[1:n1,:] end end @testset "LU of Symmetric/Hermitian" begin for HS in (Hermitian(a'a), Symmetric(a'a)) - luhs = lufact(HS) + luhs = lu(HS) @test luhs.L*luhs.U ≈ luhs.P*Matrix(HS) end end @@ -198,8 +198,8 @@ end srand(3) a = Tridiagonal(rand(9),rand(10),rand(9)) fa = Array(a) - falu = lufact(fa) - alu = lufact(a) + falu = lu(fa) + alu = lu(a) falu = convert(typeof(falu),alu) @test AbstractArray(alu) == fa end @@ -208,7 +208,7 @@ end ## Integrate in general tests when more linear algebra is implemented in julia a = convert(Matrix{Rational{BigInt}}, rand(1:10//1,n,n))/n b = rand(1:10,n,2) - @inferred lufact(a) + @inferred lu(a) lua = factorize(a) l,u,p = lua.L, lua.U, lua.p @test l*u ≈ a[p,:] @@ -242,12 +242,12 @@ end end @testset "Issue 21453" begin - @test_throws ArgumentError LinearAlgebra._cond1Inf(lufact(randn(5,5)), 2, 2.0) + @test_throws ArgumentError LinearAlgebra._cond1Inf(lu(randn(5,5)), 2, 2.0) end @testset "REPL printing" begin bf = IOBuffer() - show(bf, "text/plain", lufact(Matrix(I, 4, 4))) + show(bf, "text/plain", lu(Matrix(I, 4, 4))) seekstart(bf) @test String(take!(bf)) == """ LinearAlgebra.LU{Float64,Array{Float64,2}} @@ -266,9 +266,9 @@ U factor: end @testset "propertynames" begin - names = sort!(collect(string.(Base.propertynames(lufact(rand(3,3)))))) + names = sort!(collect(string.(Base.propertynames(lu(rand(3,3)))))) @test names == ["L", "P", "U", "p"] - allnames = sort!(collect(string.(Base.propertynames(lufact(rand(3,3)), true)))) + allnames = sort!(collect(string.(Base.propertynames(lu(rand(3,3)), true)))) @test allnames == ["L", "P", "U", "factors", "info", "ipiv", "p"] end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 2a6a279a397a0..5ff60262510a6 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -196,7 +196,7 @@ end end if eltya <: LinearAlgebra.BlasComplex @testset "inverse edge case with complex Hermitian" begin - # Hermitian matrix, where inv(lufact(A)) generates non-real diagonal elements + # Hermitian matrix, where inv(lu(A)) generates non-real diagonal elements for T in (ComplexF32, ComplexF64) A = T[0.650488+0.0im 0.826686+0.667447im; 0.826686-0.667447im 1.81707+0.0im] H = Hermitian(A) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 50b94cc6f7764..884d94418d1c0 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -225,19 +225,19 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo @test 0.5\A1 == 0.5\Matrix(A1) # inversion - @test inv(A1) ≈ inv(lufact(Matrix(A1))) + @test inv(A1) ≈ inv(lu(Matrix(A1))) inv(Matrix(A1)) # issue #11298 @test isa(inv(A1), t1) # make sure the call to LAPACK works right if elty1 <: BlasFloat - @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lufact(Matrix(A1))) + @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lu(Matrix(A1))) end # Determinant - @test det(A1) ≈ det(lufact(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n - @test logdet(A1) ≈ logdet(lufact(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n + @test det(A1) ≈ det(lu(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n + @test logdet(A1) ≈ logdet(lu(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n lada, ladb = logabsdet(A1) - flada, fladb = logabsdet(lufact(Matrix(A1))) + flada, fladb = logabsdet(lu(Matrix(A1))) @test lada ≈ flada atol=sqrt(eps(real(float(one(elty1)))))*n*n @test ladb ≈ fladb atol=sqrt(eps(real(float(one(elty1)))))*n*n @@ -425,7 +425,7 @@ for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) debug && println("\ntype of A: ", eltya, " type of b: ", eltyb, "\n") debug && println("Solve upper triangular system") - Atri = UpperTriangular(lufact(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned + Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) x = Matrix(Atri) \ b @@ -453,7 +453,7 @@ for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) end debug && println("Solve lower triangular system") - Atri = UpperTriangular(lufact(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned + Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) x = Matrix(Atri)\b diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index c304d3c3821fa..7306c9af4216f 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -937,7 +937,7 @@ function \(A::SparseMatrixCSC, B::AbstractVecOrMat) if ishermitian(A) return \(Hermitian(A), B) end - return \(lufact(A), B) + return \(lu(A), B) else return \(qrfact(A), B) end @@ -960,7 +960,7 @@ for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) if ishermitian(A) return \($xformop(Hermitian(A)), B) end - return \($xformop(lufact(A)), B) + return \($xformop(lu(A)), B) else return \($xformop(qrfact(A)), B) end @@ -983,7 +983,7 @@ function factorize(A::SparseMatrixCSC) if ishermitian(A) return factorize(Hermitian(A)) end - return lufact(A) + return lu(A) else return qrfact(A) end @@ -1009,7 +1009,6 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end chol(A::SparseMatrixCSC) = error("Use cholfact() instead of chol() for sparse matrices.") -lu(A::SparseMatrixCSC) = error("Use lufact() instead of lu() for sparse matrices.") eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 9decccac8a18e..2107faae2b23e 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1320,7 +1320,7 @@ end @testset "explicit zeros" begin if Base.USE_GPL_LIBS a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) - @test lufact(a)\[2.0, 3.0] ≈ [2.0, 3.0] + @test lu(a)\[2.0, 3.0] ≈ [2.0, 3.0] @test cholfact(a)\[2.0, 3.0] ≈ [2.0, 3.0] end end @@ -1780,7 +1780,6 @@ end C, b = A[:, 1:4], fill(1., size(A, 1)) @test !Base.USE_GPL_LIBS || factorize(C)\b ≈ Array(C)\b @test_throws ErrorException chol(A) - @test_throws ErrorException lu(A) @test_throws ErrorException eig(A) @test_throws ErrorException inv(A) end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 6f7963a209690..1255f420b8add 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -774,7 +774,7 @@ function solve(sys::Integer, F::Factor{Tv}, B::Dense{Tv}) where Tv<:VTypes if s.is_ll == 1 throw(LinearAlgebra.PosDefException(s.minor)) else - throw(ArgumentError("factorized matrix has one or more zero pivots. Try using lufact instead.")) + throw(ArgumentError("factorized matrix has one or more zero pivots. Try using `lu` instead.")) end end Dense(ccall((@cholmod_name("solve", SuiteSparse_long),:libcholmod), Ptr{C_Dense{Tv}}, @@ -1720,7 +1720,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) if issuccess(F) return \(F, B) else - return \(lufact(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A)), B) + return \(lu(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A)), B) end end end @@ -1734,7 +1734,7 @@ function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrM if issuccess(F) return \(adjoint(F), B) else - return \(adjoint(lufact(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A))), B) + return \(adjoint(lu(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A))), B) end end end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 14bdd94e7c2ac..f5e5e5e157b7c 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -46,3 +46,12 @@ end LinearAlgebra.A_mul_B!(A::StridedMatrix, Q::QRSparseQ) = LinearAlgebra.mul!(A, Q) LinearAlgebra.A_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = LinearAlgebra.mul!(Q, A) end + +# deprecate lufact to lu +@eval SuiteSparse.UMFPACK begin + @deprecate(lufact(A::SparseMatrixCSC), lu(A)) + @deprecate(lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}), lu(S)) + @deprecate(lufact(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes}, lu(A)) + @deprecate(lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes}, lu(A)) + @deprecate(lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat}, lu(A)) +end diff --git a/stdlib/SuiteSparse/src/umfpack.jl b/stdlib/SuiteSparse/src/umfpack.jl index 8b4f82a60b3e2..587de41e14fdd 100644 --- a/stdlib/SuiteSparse/src/umfpack.jl +++ b/stdlib/SuiteSparse/src/umfpack.jl @@ -6,7 +6,7 @@ export UmfpackLU import Base: (\), getproperty, show, size using LinearAlgebra -import LinearAlgebra: Factorization, det, lufact, ldiv! +import LinearAlgebra: Factorization, det, lu, ldiv! using SparseArrays import SparseArrays: nnz @@ -108,7 +108,7 @@ Base.adjoint(F::UmfpackLU) = Adjoint(F) Base.transpose(F::UmfpackLU) = Transpose(F) """ - lufact(A::SparseMatrixCSC) -> F::UmfpackLU + lu(A::SparseMatrixCSC) -> F::UmfpackLU Compute the LU factorization of a sparse matrix `A`. @@ -138,12 +138,12 @@ The relation between `F` and `A` is - [`det`](@ref) !!! note - `lufact(A::SparseMatrixCSC)` uses the UMFPACK library that is part of + `lu(A::SparseMatrixCSC)` uses the UMFPACK library that is part of SuiteSparse. As this library only supports sparse matrices with [`Float64`](@ref) or - `ComplexF64` elements, `lufact` converts `A` into a copy that is of type + `ComplexF64` elements, `lu` converts `A` into a copy that is of type `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -function lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) +function lu(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) zerobased = S.colptr[1] == 0 res = UmfpackLU(C_NULL, C_NULL, S.m, S.n, zerobased ? copy(S.colptr) : decrement(S.colptr), @@ -152,16 +152,16 @@ function lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) finalizer(umfpack_free_symbolic, res) umfpack_numeric!(res) end -lufact(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes} = - lufact(convert(SparseMatrixCSC{Float64,Ti}, A)) -lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes} = - lufact(convert(SparseMatrixCSC{ComplexF64,Ti}, A)) -lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat} = +lu(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes} = + lu(convert(SparseMatrixCSC{Float64,Ti}, A)) +lu(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes} = + lu(convert(SparseMatrixCSC{ComplexF64,Ti}, A)) +lu(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat} = throw(ArgumentError(string("matrix type ", typeof(A), "not supported. ", - "Try lufact(convert(SparseMatrixCSC{Float64/ComplexF64,Int}, A)) for ", - "sparse floating point LU using UMFPACK or lufact(Array(A)) for generic ", + "Try lu(convert(SparseMatrixCSC{Float64/ComplexF64,Int}, A)) for ", + "sparse floating point LU using UMFPACK or lu(Array(A)) for generic ", "dense LU."))) -lufact(A::SparseMatrixCSC) = lufact(float(A)) +lu(A::SparseMatrixCSC) = lu(float(A)) size(F::UmfpackLU) = (F.m, F.n) diff --git a/stdlib/SuiteSparse/test/umfpack.jl b/stdlib/SuiteSparse/test/umfpack.jl index a1be43d13dfc2..dcb74fc281629 100644 --- a/stdlib/SuiteSparse/test/umfpack.jl +++ b/stdlib/SuiteSparse/test/umfpack.jl @@ -18,7 +18,7 @@ # We might be able to support two index sizes one day for Ti in Base.uniontypes(SuiteSparse.UMFPACK.UMFITypes) A = convert(SparseMatrixCSC{Tv,Ti}, A0) - lua = lufact(A) + lua = lu(A) @test nnz(lua) == 18 @test_throws ErrorException lua.Z L,U,p,q,Rs = lua.:(:) @@ -77,7 +77,7 @@ for Ti in Base.uniontypes(SuiteSparse.UMFPACK.UMFITypes) Ac = convert(SparseMatrixCSC{ComplexF64,Ti}, Ac0) x = fill(1.0 + im, size(Ac,1)) - lua = lufact(Ac) + lua = lu(Ac) L,U,p,q,Rs = lua.:(:) @test (Diagonal(Rs) * Ac)[p,q] ≈ L * U b = Ac*x @@ -92,7 +92,7 @@ @testset "Rectangular cases" for elty in (Float64, ComplexF64) for (m, n) in ((10,5), (5, 10)) A = sparse([1:min(m,n); rand(1:m, 10)], [1:min(m,n); rand(1:n, 10)], elty == Float64 ? randn(min(m, n) + 10) : complex.(randn(min(m, n) + 10), randn(min(m, n) + 10))) - F = lufact(A) + F = lu(A) L, U, p, q, Rs = F.:(:) @test (Diagonal(Rs) * A)[p,q] ≈ L * U end @@ -100,13 +100,13 @@ @testset "Issue #4523 - complex sparse \\" begin A, b = sparse((1.0 + im)I, 2, 2), fill(1., 2) - @test A * (lufact(A)\b) ≈ b + @test A * (lu(A)\b) ≈ b @test det(sparse([1,3,3,1], [1,1,3,3], [1,1,1,1])) == 0 end @testset "UMFPACK_ERROR_n_nonpositive" begin - @test_throws ArgumentError lufact(sparse(Int[], Int[], Float64[], 5, 0)) + @test_throws ArgumentError lu(sparse(Int[], Int[], Float64[], 5, 0)) end @testset "Issue #15099" for (Tin, Tout) in ( @@ -119,7 +119,7 @@ (Int, Float64), ) - F = lufact(sparse(fill(Tin(1), 1, 1))) + F = lu(sparse(fill(Tin(1), 1, 1))) L = sparse(fill(Tout(1), 1, 1)) @test F.p == F.q == [1] @test F.Rs == [1.0] @@ -128,12 +128,12 @@ end @testset "BigFloat not supported" for T in (BigFloat, Complex{BigFloat}) - @test_throws ArgumentError lufact(sparse(fill(T(1), 1, 1))) + @test_throws ArgumentError lu(sparse(fill(T(1), 1, 1))) end @testset "size(::UmfpackLU)" begin m = n = 1 - F = lufact(sparse(fill(1., m, n))) + F = lu(sparse(fill(1., m, n))) @test size(F) == (m, n) @test size(F, 1) == m @test size(F, 2) == n @@ -143,15 +143,15 @@ @testset "Test aliasing" begin a = rand(5) - @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(a, lufact(sparse(1.0I, 5, 5)), a, SuiteSparse.UMFPACK.UMFPACK_A) + @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(a, lu(sparse(1.0I, 5, 5)), a, SuiteSparse.UMFPACK.UMFPACK_A) aa = complex(a) - @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(aa, lufact(sparse((1.0im)I, 5, 5)), aa, SuiteSparse.UMFPACK.UMFPACK_A) + @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(aa, lu(sparse((1.0im)I, 5, 5)), aa, SuiteSparse.UMFPACK.UMFPACK_A) end - @testset "Issues #18246,18244 - lufact sparse pivot" begin + @testset "Issues #18246,18244 - lu sparse pivot" begin A = sparse(1.0I, 4, 4) A[1:2,1:2] = [-.01 -200; 200 .001] - F = lufact(A) + F = lu(A) @test F.p == [3 ; 4 ; 2 ; 1] end @@ -161,7 +161,7 @@ A = N*I + sprand(N, N, p) X = zeros(Complex{Float64}, N, N) B = complex.(rand(N, N), rand(N, N)) - luA, lufA = lufact(A), lufact(Array(A)) + luA, lufA = lu(A), lu(Array(A)) @test LinearAlgebra.ldiv!(copy(X), luA, B) ≈ LinearAlgebra.ldiv!(copy(X), lufA, B) @test LinearAlgebra.ldiv!(copy(X), adjoint(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), adjoint(lufA), B) @test LinearAlgebra.ldiv!(copy(X), transpose(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), transpose(lufA), B) From 0968a871e1a095a8a9fc32b799b8baa4a412e3dd Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Fri, 18 May 2018 13:15:38 -0700 Subject: [PATCH 02/21] Deprecate eigfact to eig. --- NEWS.md | 5 +- .../src/IterativeEigensolvers.jl | 2 +- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/bidiag.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 18 +-- stdlib/LinearAlgebra/src/deprecated.jl | 20 +++ stdlib/LinearAlgebra/src/diagonal.jl | 2 +- stdlib/LinearAlgebra/src/eigen.jl | 117 ++++++------------ stdlib/LinearAlgebra/src/symmetric.jl | 40 +++--- stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/src/tridiag.jl | 8 +- stdlib/LinearAlgebra/test/dense.jl | 8 +- stdlib/LinearAlgebra/test/diagonal.jl | 2 +- stdlib/LinearAlgebra/test/eigen.jl | 14 +-- stdlib/LinearAlgebra/test/lapack.jl | 4 +- stdlib/LinearAlgebra/test/lu.jl | 4 +- stdlib/LinearAlgebra/test/symmetric.jl | 16 +-- stdlib/LinearAlgebra/test/tridiag.jl | 8 +- 19 files changed, 130 insertions(+), 144 deletions(-) diff --git a/NEWS.md b/NEWS.md index b5a14785da79b..f7b44425ee90d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -230,6 +230,9 @@ This section lists changes that do not have deprecation warnings. * `lu` methods now return decomposition objects such as `LU` rather than tuples of arrays or tuples of numbers ([#27159]). + * `eig` methods now return decomposition objects such as `Eigen` and + `GeneralizedEigen` rather than tuples of arrays or tuples of numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -678,7 +681,7 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact` has been deprecated to `lu` ([#27159]). + * `lufact` and `eigfact` have respectively been deprecated to `lu` and `eig` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl index d21bab552441a..547708776dae6 100644 --- a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl +++ b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl @@ -78,7 +78,7 @@ function _eigs(A, B; sym = !iscmplx && issymmetric(A) && issymmetric(B) nevmax = sym ? n-1 : n-2 if nevmax <= 0 - throw(ArgumentError("input matrix A is too small. Use eigfact instead.")) + throw(ArgumentError("input matrix A is too small. Use eig instead.")) end if nev > nevmax @warn "Adjusting nev from $nev to $nevmax" diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index ed7bbef38db51..19e2a6c800089 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -340,7 +340,6 @@ LinearAlgebra.eigvals! LinearAlgebra.eigmax LinearAlgebra.eigmin LinearAlgebra.eigvecs -LinearAlgebra.eigfact LinearAlgebra.eigfact! LinearAlgebra.hessfact LinearAlgebra.hessfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index b11c223c87db2..17450b0c1e6ac 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -81,7 +81,6 @@ export diagm, dot, eig, - eigfact, eigfact!, eigmax, eigmin, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 9d2d617b78023..76efa4356eafd 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -625,4 +625,4 @@ function eigvecs(M::Bidiagonal{T}) where T end Q #Actually Triangular end -eigfact(M::Bidiagonal) = Eigen(eigvals(M), eigvecs(M)) +eig(M::Bidiagonal) = Eigen(eigvals(M), eigvecs(M)) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 8d84cc2e4ea9e..4b064b987c874 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -478,7 +478,7 @@ Compute the matrix exponential of `A`, defined by e^A = \\sum_{n=0}^{\\infty} \\frac{A^n}{n!}. ``` -For symmetric or Hermitian `A`, an eigendecomposition ([`eigfact`](@ref)) is +For symmetric or Hermitian `A`, an eigendecomposition ([`eig`](@ref)) is used, otherwise the scaling and squaring algorithm (see [^H05]) is chosen. [^H05]: Nicholas J. Higham, "The squaring and scaling method for the matrix exponential revisited", SIAM Journal on Matrix Analysis and Applications, 26(4), 2005, 1179-1193. [doi:10.1137/090768539](https://doi.org/10.1137/090768539) @@ -602,7 +602,7 @@ the unique matrix ``X`` such that ``e^X = A`` and ``-\\pi < Im(\\lambda) < \\pi` the eigenvalues ``\\lambda`` of ``X``. If `A` has nonpositive eigenvalues, a nonprincipal matrix function is returned whenever possible. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used, if `A` is triangular an improved version of the inverse scaling and squaring method is employed (see [^AH12] and [^AHR13]). For general matrices, the complex Schur form ([`schur`](@ref)) is computed and the triangular algorithm is used on the @@ -660,7 +660,7 @@ If `A` has no negative real eigenvalues, compute the principal matrix square roo that is the unique matrix ``X`` with eigenvalues having positive real part such that ``X^2 = A``. Otherwise, a nonprincipal square root is returned. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the square root. Otherwise, the square root is determined by means of the Björck-Hammarling method [^BH83], which computes the complex Schur form ([`schur`](@ref)) and then the complex square root of the triangular factor. @@ -732,7 +732,7 @@ end Compute the matrix cosine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the cosine. Otherwise, the cosine is determined by calling [`exp`](@ref). # Examples @@ -765,7 +765,7 @@ end Compute the matrix sine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the sine. Otherwise, the sine is determined by calling [`exp`](@ref). # Examples @@ -851,7 +851,7 @@ end Compute the matrix tangent of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the tangent. Otherwise, the tangent is determined by calling [`exp`](@ref). # Examples @@ -924,7 +924,7 @@ end Compute the inverse matrix cosine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the inverse cosine. Otherwise, the inverse cosine is determined by using [`log`](@ref) and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_1]. @@ -955,7 +955,7 @@ end Compute the inverse matrix sine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the inverse sine. Otherwise, the inverse sine is determined by using [`log`](@ref) and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_2]. @@ -986,7 +986,7 @@ end Compute the inverse matrix tangent of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eig`](@ref)) is used to compute the inverse tangent. Otherwise, the inverse tangent is determined by using [`log`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_3]. diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7a9472daf400e..7c3909178b520 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1268,3 +1268,23 @@ export lufact @deprecate(lufact(A::AbstractMatrix{T}) where T, lu(A)) @deprecate(lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T, lu(A, pivot)) @deprecate(lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat}, lu(A, pivot)) + +# deprecate eigfact to eig +export eigfact +@deprecate(eigfact(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T, eig(A; permute=permute, scale=scale)) +@deprecate(eigfact(x::Number), eig(x)) +@deprecate(eigfact(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB}, eig(A, B)) +@deprecate(eigfact(A::Number, B::Number), eig(A, B)) + +@deprecate(eigfact(A::SymTridiagonal{T}) where T, eig(A)) +@deprecate(eigfact(A::SymTridiagonal{T}, irange::UnitRange) where T, eig(A, irange)) +@deprecate(eigfact(A::SymTridiagonal{T}, vl::Real, vu::Real) where T, eig(A, vl, vu)) + +@deprecate(eigfact(M::Bidiagonal), eig(M)) +@deprecate(eigfact(A::RealHermSymComplexHerm), eig(A)) +@deprecate(eigfact(A::RealHermSymComplexHerm, irange::UnitRange), eig(A, irange)) +@deprecate(eigfact(A::RealHermSymComplexHerm, vl::Real, vh::Real), eig(A, vl, vh)) + +@deprecate(eigfact(A::AbstractTriangular), eig(A)) + +@deprecate(eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true), eig(D; permute=permute, scale=scale)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 95406701e3048..8037266d1563c 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -445,7 +445,7 @@ end eigvals(D::Diagonal{<:Number}) = D.diag eigvals(D::Diagonal) = [eigvals(x) for x in D.diag] #For block matrices, etc. eigvecs(D::Diagonal) = Matrix{eltype(D)}(I, size(D)) -function eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true) +function eig(D::Diagonal; permute::Bool=true, scale::Bool=true) if any(!isfinite, D.diag) throw(ArgumentError("matrix contains Infs or NaNs")) end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index 56890060f13f2..7cb421a25b503 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -20,12 +20,24 @@ end GeneralizedEigen(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V} = GeneralizedEigen{T,V,typeof(vectors),typeof(values)}(values, vectors) +# iteration for destructuring into components +Base.iterate(S::Union{Eigen,GeneralizedEigen}) = (S.values, Val(:vectors)) +Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:vectors}) = (S.vectors, Val(:done)) +Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) + i == 1 ? (return S.values) : + i == 2 ? (return S.vectors) : + throw(BoundsError(S, i)) +end + isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ eigfact!(A, [B]) -Same as [`eigfact`](@ref), but saves space by overwriting the input `A` (and +Same as [`eig`](@ref), but saves space by overwriting the input `A` (and `B`), instead of creating a copy. """ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal @@ -59,12 +71,14 @@ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) whe end """ - eigfact(A; permute::Bool=true, scale::Bool=true) -> Eigen + eig(A; permute::Bool=true, scale::Bool=true) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). For general nonsymmetric matrices it is possible to specify how the matrix is balanced @@ -74,7 +88,7 @@ make rows and columns more equal in norm. The default is `true` for both options # Examples ```jldoctest -julia> F = eigfact([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) +julia> F = eig([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}} eigenvalues: 3-element Array{Float64,1}: @@ -98,53 +112,26 @@ julia> F.vectors 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 -``` -""" -function eigfact(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T - AA = copy_oftype(A, eigtype(T)) - isdiag(AA) && return eigfact(Diagonal(AA), permute = permute, scale = scale) - return eigfact!(AA, permute = permute, scale = scale) -end -eigfact(x::Number) = Eigen([x], fill(one(x), 1, 1)) - -function eig(A::Union{Number, StridedMatrix}; permute::Bool=true, scale::Bool=true) - F = eigfact(A, permute=permute, scale=scale) - F.values, F.vectors -end -""" - eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> D, V - eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> D, V - eig(A, permute::Bool=true, scale::Bool=true) -> D, V - -Computes eigenvalues (`D`) and eigenvectors (`V`) of `A`. -See [`eigfact`](@ref) for details on the -`irange`, `vl`, and `vu` arguments -(for [`SymTridiagonal`](@ref), [`Hermitian`](@ref), and -[`Symmetric`](@ref) matrices) -and the `permute` and `scale` keyword arguments. -The eigenvectors are returned columnwise. +julia> vals, vecs = F; # destructuring via iteration -# Examples -```jldoctest -julia> eig([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]) +julia> vals == F.values && vecs == F.vectors +true ``` - -`eig` is a wrapper around [`eigfact`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigfact`](@ref) is recommended. """ -function eig(A::AbstractMatrix, args...) - F = eigfact(A, args...) - F.values, F.vectors +function eig(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T + AA = copy_oftype(A, eigtype(T)) + isdiag(AA) && return eig(Diagonal(AA), permute = permute, scale = scale) + return eigfact!(AA, permute = permute, scale = scale) end +eig(x::Number) = Eigen([x], fill(one(x), 1, 1)) """ eigvecs(A; permute::Bool=true, scale::Bool=true) -> Matrix Return a matrix `M` whose columns are the eigenvectors of `A`. (The `k`th eigenvector can be obtained from the slice `M[:, k]`.) The `permute` and `scale` keywords are the same as -for [`eigfact`](@ref). +for [`eig`](@ref). # Examples ```jldoctest @@ -156,7 +143,7 @@ julia> eigvecs([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) ``` """ eigvecs(A::Union{Number, AbstractMatrix}; permute::Bool=true, scale::Bool=true) = - eigvecs(eigfact(A, permute=permute, scale=scale)) + eigvecs(eig(A, permute=permute, scale=scale)) eigvecs(F::Union{Eigen, GeneralizedEigen}) = F.vectors eigvals(F::Union{Eigen, GeneralizedEigen}) = F.values @@ -351,13 +338,15 @@ function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex end """ - eigfact(A, B) -> GeneralizedEigen + eig(A, B) -> GeneralizedEigen Computes the generalized eigenvalue decomposition of `A` and `B`, returning a `GeneralizedEigen` factorization object `F` which contains the generalized eigenvalues in `F.values` and the generalized eigenvectors in the columns of the matrix `F.vectors`. (The `k`th generalized eigenvector can be obtained from the slice `F.vectors[:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + # Examples ```jldoctest julia> A = [1 0; 0 -1] @@ -370,7 +359,7 @@ julia> B = [0 1; 1 0] 0 1 1 0 -julia> F = eigfact(A, B); +julia> F = eig(A, B); julia> F.values 2-element Array{Complex{Float64},1}: @@ -381,47 +370,19 @@ julia> F.vectors 2×2 Array{Complex{Float64},2}: 0.0-1.0im 0.0+1.0im -1.0-0.0im -1.0+0.0im + +julia> vals, vecs = F; # destructuring via iteration + +julia> vals == F.values && vecs == F.vectors +true ``` """ -function eigfact(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} +function eig(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) return eigfact!(copy_oftype(A, S), copy_oftype(B, S)) end -eigfact(A::Number, B::Number) = eigfact(fill(A,1,1), fill(B,1,1)) - -""" - eig(A, B) -> D, V - -Computes generalized eigenvalues (`D`) and vectors (`V`) of `A` with respect to `B`. - -`eig` is a wrapper around [`eigfact`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigfact`](@ref) is recommended. - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Array{Int64,2}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Array{Int64,2}: - 0 1 - 1 0 - -julia> eig(A, B) -(Complex{Float64}[0.0+1.0im, 0.0-1.0im], Complex{Float64}[0.0-1.0im 0.0+1.0im; -1.0-0.0im -1.0+0.0im]) -``` -""" -function eig(A::AbstractMatrix, B::AbstractMatrix) - F = eigfact(A,B) - F.values, F.vectors -end -function eig(A::Number, B::Number) - F = eigfact(A,B) - F.values, F.vectors -end +eig(A::Number, B::Number) = eig(fill(A,1,1), fill(B,1,1)) """ eigvals!(A, B) -> values @@ -524,7 +485,7 @@ julia> eigvecs(A, B) -1.0-0.0im -1.0+0.0im ``` """ -eigvecs(A::AbstractMatrix, B::AbstractMatrix) = eigvecs(eigfact(A, B)) +eigvecs(A::AbstractMatrix, B::AbstractMatrix) = eigvecs(eig(A, B)) function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Union{Eigen,GeneralizedEigen}) println(io, summary(F)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index b4c69c85ef47d..d5f5eb580596a 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -476,7 +476,7 @@ inv(A::Symmetric{<:Any,<:StridedMatrix}) = Symmetric(_inv(A), Symbol(A.uplo)) eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) -function eigfact(A::RealHermSymComplexHerm) +function eig(A::RealHermSymComplexHerm) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) @@ -485,12 +485,14 @@ end eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) """ - eigfact(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen + eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` which contains the eigenvalues in `F[:values]` and the eigenvectors in the columns of the matrix `F[:vectors]`. (The `k`th eigenvector can be obtained from the slice `F[:vectors][:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). The `UnitRange` `irange` specifies indices of the sorted eigenvalues to search for. @@ -499,7 +501,7 @@ The `UnitRange` `irange` specifies indices of the sorted eigenvalues to search f If `irange` is not `1:n`, where `n` is the dimension of `A`, then the returned factorization will be a *truncated* factorization. """ -function eigfact(A::RealHermSymComplexHerm, irange::UnitRange) +function eig(A::RealHermSymComplexHerm, irange::UnitRange) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) @@ -509,12 +511,14 @@ eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where Eigen(LAPACK.syevr!('V', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)...) """ - eigfact(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> Eigen + eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` which contains the eigenvalues in `F[:values]` and the eigenvectors in the columns of the matrix `F[:vectors]`. (The `k`th eigenvector can be obtained from the slice `F[:vectors][:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). `vl` is the lower bound of the window of eigenvalues to search for, and `vu` is the upper bound. @@ -523,7 +527,7 @@ The following functions are available for `Eigen` objects: [`inv`](@ref), [`det` If [`vl`, `vu`] does not contain all eigenvalues of `A`, then the returned factorization will be a *truncated* factorization. """ -function eigfact(A::RealHermSymComplexHerm, vl::Real, vh::Real) +function eig(A::RealHermSymComplexHerm, vl::Real, vh::Real) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) @@ -634,7 +638,7 @@ eigvals!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatr eigvals!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} = LAPACK.sygvd!(1, 'N', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data'))[1] -eigvecs(A::HermOrSym) = eigvecs(eigfact(A)) +eigvecs(A::HermOrSym) = eigvecs(eig(A)) function svdvals!(A::RealHermSymComplexHerm) vals = eigvals!(A) @@ -656,7 +660,7 @@ function sympow(A::Symmetric, p::Integer) end function ^(A::Symmetric{<:Real}, p::Real) isinteger(p) && return integerpow(A, p) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 0, F.values) return Symmetric((F.vectors * Diagonal((F.values).^p)) * F.vectors') else @@ -680,7 +684,7 @@ function ^(A::Hermitian, p::Integer) end function ^(A::Hermitian{T}, p::Real) where T isinteger(p) && return integerpow(A, p) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal((F.values).^p)) * F.vectors' if T <: Real @@ -699,12 +703,12 @@ end for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eig(A) return Symmetric((F.vectors * Diagonal(($func).(F.values))) * F.vectors') end function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eig(A) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n retmat[i,i] = real(retmat[i,i]) @@ -717,7 +721,7 @@ end for func in (:acos, :asin) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eig(A) if all(λ -> -1 ≤ λ ≤ 1, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' else @@ -727,7 +731,7 @@ for func in (:acos, :asin) end function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eig(A) if all(λ -> -1 ≤ λ ≤ 1, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n @@ -742,7 +746,7 @@ for func in (:acos, :asin) end function acosh(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 1, F.values) retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' else @@ -752,7 +756,7 @@ function acosh(A::HermOrSym{<:Real}) end function acosh(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 1, F.values) retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' for i = 1:n @@ -766,7 +770,7 @@ end function sincos(A::HermOrSym{<:Real}) n = checksquare(A) - F = eigfact(A) + F = eig(A) S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) @@ -775,7 +779,7 @@ function sincos(A::HermOrSym{<:Real}) end function sincos(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eig(A) S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) @@ -792,7 +796,7 @@ end for func in (:log, :sqrt) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' else @@ -803,7 +807,7 @@ for func in (:log, :sqrt) function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eig(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index c6ef222cacc7b..26bd14dc66b1f 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2432,7 +2432,7 @@ function logabsdet(A::Union{UpperTriangular{T},LowerTriangular{T}}) where T return abs_det, sgn end -eigfact(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) +eig(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems for func in (:svd, :svdfact, :svdfact!, :svdvals) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index c0a040e66bf8b..2f1c0f5f974ba 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -184,16 +184,16 @@ end (\)(T::SymTridiagonal, B::StridedVecOrMat) = ldltfact(T)\B eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) -eigfact(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) +eig(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = Eigen(LAPACK.stegr!('V', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)...) -eigfact(A::SymTridiagonal{T}, irange::UnitRange) where T = +eig(A::SymTridiagonal{T}, irange::UnitRange) where T = eigfact!(copy_oftype(A, eigtype(T)), irange) eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = Eigen(LAPACK.stegr!('V', 'V', A.dv, A.ev, vl, vu, 0, 0)...) -eigfact(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = +eig(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = eigfact!(copy_oftype(A, eigtype(T)), vl, vu) eigvals!(A::SymTridiagonal{<:BlasReal}) = LAPACK.stev!('N', A.dv, A.ev)[1] @@ -214,7 +214,7 @@ eigmax(A::SymTridiagonal) = eigvals(A, size(A, 1):size(A, 1))[1] eigmin(A::SymTridiagonal) = eigvals(A, 1:1)[1] #Compute selected eigenvectors only corresponding to particular eigenvalues -eigvecs(A::SymTridiagonal) = eigfact(A).vectors +eigvecs(A::SymTridiagonal) = eig(A).vectors """ eigvecs(A::SymTridiagonal[, eigvals]) -> Matrix diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 9bbd914047462..08ac111b94ee8 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -581,19 +581,19 @@ end @test all(z -> (0 < real(z) < π || abs(real(z)) < abstol && imag(z) >= 0 || abs(real(z) - π) < abstol && imag(z) <= 0), - eigfact(acos(A)).values) + eig(acos(A)).values) @test all(z -> (-π/2 < real(z) < π/2 || abs(real(z) + π/2) < abstol && imag(z) >= 0 || abs(real(z) - π/2) < abstol && imag(z) <= 0), - eigfact(asin(A)).values) + eig(asin(A)).values) @test all(z -> (-π < imag(z) < π && real(z) > 0 || 0 <= imag(z) < π && abs(real(z)) < abstol || abs(imag(z) - π) < abstol && real(z) >= 0), - eigfact(acosh(A)).values) + eig(acosh(A)).values) @test all(z -> (-π/2 < imag(z) < π/2 || abs(imag(z) + π/2) < abstol && real(z) <= 0 || abs(imag(z) - π/2) < abstol && real(z) <= 0), - eigfact(asinh(A)).values) + eig(asinh(A)).values) end end end diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 06a5d78ba2c84..c97c426b71c8c 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -207,7 +207,7 @@ srand(1) @test factorize(D) == D @testset "Eigensystem" begin - eigD = eigfact(D) + eigD = eig(D) @test Diagonal(eigD.values) ≈ D @test eigD.vectors == Matrix(I, size(D)) end diff --git a/stdlib/LinearAlgebra/test/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl index eec694ed7f475..e9fc8318e6ef5 100644 --- a/stdlib/LinearAlgebra/test/eigen.jl +++ b/stdlib/LinearAlgebra/test/eigen.jl @@ -37,7 +37,7 @@ aimg = randn(n,n)/2 for i in 1:size(a,2) @test a*v[:,i] ≈ d[i]*v[:,i] end - f = eigfact(a) + f = eig(a) @test det(a) ≈ det(f) @test inv(a) ≈ inv(f) @test isposdef(a) == isposdef(f) @@ -45,7 +45,7 @@ aimg = randn(n,n)/2 @test eigvecs(f) === f.vectors @test Array(f) ≈ a - num_fact = eigfact(one(eltya)) + num_fact = eig(one(eltya)) @test num_fact.values[1] == one(eltya) h = asym @test minimum(eigvals(h)) ≈ eigmin(h) @@ -61,7 +61,7 @@ aimg = randn(n,n)/2 asym_sg = view(asym, 1:n1, 1:n1) a_sg = view(a, 1:n, n1+1:n2) end - f = eigfact(asym_sg, a_sg'a_sg) + f = eig(asym_sg, a_sg'a_sg) @test asym_sg*f.vectors ≈ (a_sg'a_sg*f.vectors) * Diagonal(f.values) @test f.values ≈ eigvals(asym_sg, a_sg'a_sg) @test prod(f.values) ≈ prod(eigvals(asym_sg/(a_sg'a_sg))) atol=200ε @@ -82,7 +82,7 @@ aimg = randn(n,n)/2 a1_nsg = view(a, 1:n1, 1:n1) a2_nsg = view(a, n1+1:n2, n1+1:n2) end - f = eigfact(a1_nsg, a2_nsg) + f = eig(a1_nsg, a2_nsg) @test a1_nsg*f.vectors ≈ (a2_nsg*f.vectors) * Diagonal(f.values) @test f.values ≈ eigvals(a1_nsg, a2_nsg) @test prod(f.values) ≈ prod(eigvals(a1_nsg/a2_nsg)) atol=50000ε @@ -109,7 +109,7 @@ end # test a matrix larger than 140-by-140 for #14174 let aa = rand(200, 200) for a in (aa, view(aa, 1:n, 1:n)) - f = eigfact(a) + f = eig(a) @test a ≈ f.vectors * Diagonal(f.values) / f.vectors end end @@ -124,8 +124,8 @@ end @testset "text/plain (REPL) printing of Eigen and GeneralizedEigen" begin A, B = randn(5,5), randn(5,5) - e = eigfact(A) - ge = eigfact(A, B) + e = eig(A) + ge = eig(A, B) valsstring = sprint((t, s) -> show(t, "text/plain", s), e.values) vecsstring = sprint((t, s) -> show(t, "text/plain", s), e.vectors) factstring = sprint((t, s) -> show(t, "text/plain", s), e) diff --git a/stdlib/LinearAlgebra/test/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl index 642827da3082a..2dcc32813b46b 100644 --- a/stdlib/LinearAlgebra/test/lapack.jl +++ b/stdlib/LinearAlgebra/test/lapack.jl @@ -231,7 +231,7 @@ end @testset for elty in (ComplexF32, ComplexF64) A = rand(elty,10,10) Aw, Avl, Avr = LAPACK.geev!('N','V',copy(A)) - fA = eigfact(A) + fA = eig(A) @test fA.values ≈ Aw @test fA.vectors ≈ Avr end @@ -660,7 +660,7 @@ end # Issue 14065 (and 14220) let A = [NaN NaN; NaN NaN] - @test_throws ArgumentError eigfact(A) + @test_throws ArgumentError eig(A) end end # module TestLAPACK diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index 96eab7b13fc88..73aff7608c7ea 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -50,10 +50,10 @@ dimg = randn(n)/2 -2.0 4.0 1.0 -eps(real(one(eltya))); -eps(real(one(eltya)))/4 eps(real(one(eltya)))/2 -1.0 0; -0.5 -0.5 0.1 1.0]) - F = eigfact(A, permute=false, scale=false) + F = eig(A, permute=false, scale=false) eig(A, permute=false, scale=false) @test F.vectors*Diagonal(F.values)/F.vectors ≈ A - F = eigfact(A) + F = eig(A) # @test norm(F.vectors*Diagonal(F.values)/F.vectors - A) > 0.01 end end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 5ff60262510a6..101e5f73085a2 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -222,14 +222,14 @@ end @test asym*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*transpose(v) ≈ asym @test isequal(eigvals(asym[1]), eigvals(asym[1:1,1:1])) - @test abs.(eigfact(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eig(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Symmetric(asym), 1:2) # same result, but checks that method works - @test abs.(eigfact(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eig(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Symmetric(asym), 1:2) ≈ d[1:2] @test eigvals(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - # eigfact doesn't support Symmetric{Complex} - @test Matrix(eigfact(asym)) ≈ asym + # eig doesn't support Symmetric{Complex} + @test Matrix(eig(asym)) ≈ asym @test eigvecs(Symmetric(asym)) ≈ eigvecs(asym) end @@ -237,13 +237,13 @@ end @test aherm*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*v' ≈ aherm @test isequal(eigvals(aherm[1]), eigvals(aherm[1:1,1:1])) - @test abs.(eigfact(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eig(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Hermitian(aherm), 1:2) # same result, but checks that method works - @test abs.(eigfact(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eig(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Hermitian(aherm), 1:2) ≈ d[1:2] @test eigvals(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - @test Matrix(eigfact(aherm)) ≈ aherm + @test Matrix(eig(aherm)) ≈ aherm @test eigvecs(Hermitian(aherm)) ≈ eigvecs(aherm) # relation to svdvals @@ -365,7 +365,7 @@ end end @testset "Issues #8057 and #8058. f=$f, A=$A" for f in - (eigfact, eigvals, eig), + (eigvals, eig), A in (Symmetric([0 1; 1 0]), Hermitian([0 im; -im 0])) @test_throws ArgumentError f(A, 3, 2) @test_throws ArgumentError f(A, 1:4) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 679ed2dfd20a0..91eec013b91bf 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -253,14 +253,14 @@ end test_approx_eq_vecs(v, evecs) end @testset "stegr! call with index range" begin - F = eigfact(SymTridiagonal(b, a),1:2) - fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),1:2) + F = eig(SymTridiagonal(b, a),1:2) + fF = eig(Symmetric(Array(SymTridiagonal(b, a))),1:2) test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end @testset "stegr! call with value range" begin - F = eigfact(SymTridiagonal(b, a),0.0,1.0) - fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) + F = eig(SymTridiagonal(b, a),0.0,1.0) + fF = eig(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end From c0440d933c962f07b77435b7a55de49fed689dc4 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Fri, 18 May 2018 20:58:31 -0700 Subject: [PATCH 03/21] Deprecate schurfact to schur. --- NEWS.md | 6 +- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 22 ++-- stdlib/LinearAlgebra/src/deprecated.jl | 7 ++ stdlib/LinearAlgebra/src/schur.jl | 119 +++++++++++----------- stdlib/LinearAlgebra/test/schur.jl | 8 +- 7 files changed, 86 insertions(+), 78 deletions(-) diff --git a/NEWS.md b/NEWS.md index f7b44425ee90d..7cd1f72bd497a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -233,6 +233,9 @@ This section lists changes that do not have deprecation warnings. * `eig` methods now return decomposition objects such as `Eigen` and `GeneralizedEigen` rather than tuples of arrays or tuples of numbers ([#27159]). + * `schur` methods now return decomposition objects such as `Schur` and + `GeneralizedSchur` rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -681,7 +684,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact` and `eigfact` have respectively been deprecated to `lu` and `eig` ([#27159]). + * `lufact`, `eigfact`, and `schurfact` have respectively been deprecated to + `lu`, `eig`, and `schur` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 19e2a6c800089..f8ec8c21ec61d 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -343,7 +343,6 @@ LinearAlgebra.eigvecs LinearAlgebra.eigfact! LinearAlgebra.hessfact LinearAlgebra.hessfact! -LinearAlgebra.schurfact LinearAlgebra.schurfact! LinearAlgebra.schur LinearAlgebra.ordschur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 17450b0c1e6ac..509395cdd499c 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -133,7 +133,6 @@ export rdiv!, schur, schurfact!, - schurfact, svd, svdfact!, svdfact, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 4b064b987c874..5949b48aa2b3b 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -638,12 +638,12 @@ function log(A::StridedMatrix) return triu!(parent(log(UpperTriangular(complex(A))))) else if isreal(A) - SchurF = schurfact(real(A)) + SchurF = schur(real(A)) else - SchurF = schurfact(A) + SchurF = schur(A) end if !istriu(SchurF.T) - SchurS = schurfact(complex(SchurF.T)) + SchurS = schur(complex(SchurF.T)) logT = SchurS.Z * log(UpperTriangular(SchurS.T)) * SchurS.Z' return SchurF.Z * logT * SchurF.Z' else @@ -692,7 +692,7 @@ function sqrt(A::StridedMatrix{<:Real}) if istriu(A) return triu!(parent(sqrt(UpperTriangular(A)))) else - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) R = triu!(parent(sqrt(UpperTriangular(SchurF.T)))) # unwrapping unnecessary? return SchurF.vectors * R * SchurF.vectors' end @@ -706,7 +706,7 @@ function sqrt(A::StridedMatrix{<:Complex}) if istriu(A) return triu!(parent(sqrt(UpperTriangular(A)))) else - SchurF = schurfact(A) + SchurF = schur(A) R = triu!(parent(sqrt(UpperTriangular(SchurF.T)))) # unwrapping unnecessary? return SchurF.vectors * R * SchurF.vectors' end @@ -944,7 +944,7 @@ function acos(A::AbstractMatrix) acosHermA = acos(Hermitian(A)) return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(-im * log(U + im * sqrt(I - U^2)))) return SchurF.Z * R * SchurF.Z' @@ -975,7 +975,7 @@ function asin(A::AbstractMatrix) asinHermA = asin(Hermitian(A)) return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(-im * log(im * U + sqrt(I - U^2)))) return SchurF.Z * R * SchurF.Z' @@ -1005,7 +1005,7 @@ function atan(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(atan(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = im * UpperTriangular(SchurF.T) R = triu!(parent(log((I + U) / (I - U)) / 2im)) return SchurF.Z * R * SchurF.Z' @@ -1024,7 +1024,7 @@ function acosh(A::AbstractMatrix) acoshHermA = acosh(Hermitian(A)) return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log(U + sqrt(U - I) * sqrt(U + I)))) return SchurF.Z * R * SchurF.Z' @@ -1042,7 +1042,7 @@ function asinh(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(asinh(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log(U + sqrt(I + U^2)))) return SchurF.Z * R * SchurF.Z' @@ -1060,7 +1060,7 @@ function atanh(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(atanh(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log((I + U) / (I - U)) / 2)) return SchurF.Z * R * SchurF.Z' diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7c3909178b520..17fb5015aa10e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1288,3 +1288,10 @@ export eigfact @deprecate(eigfact(A::AbstractTriangular), eig(A)) @deprecate(eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true), eig(D; permute=permute, scale=scale)) + +# deprecate schurfact to schur +export schurfact +@deprecate(schurfact(A::StridedMatrix{<:BlasFloat}), schur(A)) +@deprecate(schurfact(A::StridedMatrix{T}) where T, schur(A)) +@deprecate(schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat}, schur(A)) +@deprecate(schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, schur(A)) diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index ec4595351bfe2..51abb54e50a0b 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -9,10 +9,24 @@ struct Schur{Ty,S<:AbstractMatrix} <: Factorization{Ty} end Schur(T::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}, values::Vector) where {Ty} = Schur{Ty, typeof(T)}(T, Z, values) +# iteration for destructuring into components +Base.iterate(S::Schur) = (S.T, Val(:Z)) +Base.iterate(S::Schur, ::Val{:Z}) = (S.Z, Val(:values)) +Base.iterate(S::Schur, ::Val{:values}) = (S.values, Val(:done)) +Base.iterate(S::Schur, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::Schur, i::Integer) + i == 1 ? (return S.T) : + i == 2 ? (return S.Z) : + i == 3 ? (return S.values) : + throw(BoundsError(S, i)) +end + """ schurfact!(A::StridedMatrix) -> F::Schur -Same as [`schurfact`](@ref) but uses the input argument `A` as workspace. +Same as [`schur`](@ref) but uses the input argument `A` as workspace. # Examples ```jldoctest @@ -45,13 +59,15 @@ julia> A schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) """ - schurfact(A::StridedMatrix) -> F::Schur + schur(A::StridedMatrix) -> F::Schur Computes the Schur factorization of the matrix `A`. The (quasi) triangular Schur factor can be obtained from the `Schur` object `F` with either `F.Schur` or `F.T` and the orthogonal/unitary Schur vectors can be obtained with `F.vectors` or `F.Z` such that `A = F.vectors * F.Schur * F.vectors'`. The eigenvalues of `A` can be obtained with `F.values`. +Iterating the decomposition produces the components `F.T`, `F.Z`, and `F.values`. + # Examples ```jldoctest julia> A = [5. 7.; -2. -4.] @@ -59,7 +75,7 @@ julia> A = [5. 7.; -2. -4.] 5.0 7.0 -2.0 -4.0 -julia> F = schurfact(A) +julia> F = schur(A) Schur{Float64,Array{Float64,2}} T factor: 2×2 Array{Float64,2}: @@ -78,10 +94,21 @@ julia> F.vectors * F.Schur * F.vectors' 2×2 Array{Float64,2}: 5.0 7.0 -2.0 -4.0 + +julia> t, z, vals = F; # destructuring via iteration + +julia> t == F.T && z == F.Z && vals == F.values +true ``` """ -schurfact(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) -schurfact(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) +schur(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) +schur(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) + +schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) +schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) +schur(A::UpperTriangular) = schur(copyto!(similar(parent(A)), A)) +schur(A::LowerTriangular) = schur(copyto!(similar(parent(A)), A)) +schur(A::Tridiagonal) = schur(Matrix(A)) function getproperty(F::Schur, d::Symbol) if d == :Schur @@ -106,47 +133,6 @@ function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Schur) show(io, mime, F.values) end -""" - schur(A::StridedMatrix) -> T::Matrix, Z::Matrix, λ::Vector - -Computes the Schur factorization of the matrix `A`. The methods return the (quasi) -triangular Schur factor `T` and the orthogonal/unitary Schur vectors `Z` such that -`A = Z * T * Z'`. The eigenvalues of `A` are returned in the vector `λ`. - -See [`schurfact`](@ref). - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Array{Float64,2}: - 5.0 7.0 - -2.0 -4.0 - -julia> T, Z, lambda = schur(A) -([3.0 9.0; 0.0 -2.0], [0.961524 0.274721; -0.274721 0.961524], [3.0, -2.0]) - -julia> Z * Z' -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 1.0 - -julia> Z * T * Z' -2×2 Array{Float64,2}: - 5.0 7.0 - -2.0 -4.0 -``` -""" -function schur(A::StridedMatrix) - SchurF = schurfact(A) - SchurF.T, SchurF.Z, SchurF.values -end -schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) -schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) -schur(A::UpperTriangular) = schur(copyto!(similar(parent(A)), A)) -schur(A::LowerTriangular) = schur(copyto!(similar(parent(A)), A)) -schur(A::Tridiagonal) = schur(Matrix(A)) - - """ ordschur!(F::Schur, select::Union{Vector{Bool},BitVector}) -> F::Schur @@ -209,16 +195,36 @@ function GeneralizedSchur(S::AbstractMatrix{Ty}, T::AbstractMatrix{Ty}, alpha::V GeneralizedSchur{Ty, typeof(S)}(S, T, alpha, beta, Q, Z) end +# iteration for destructuring into components +Base.iterate(S::GeneralizedSchur) = (S.S, Val(:T)) +Base.iterate(S::GeneralizedSchur, ::Val{:T}) = (S.T, Val(:Q)) +Base.iterate(S::GeneralizedSchur, ::Val{:Q}) = (S.Q, Val(:Z)) +Base.iterate(S::GeneralizedSchur, ::Val{:Z}) = (S.Z, Val(:α)) +Base.iterate(S::GeneralizedSchur, ::Val{:α}) = (S.α, Val(:β)) +Base.iterate(S::GeneralizedSchur, ::Val{:β}) = (S.β, Val(:done)) +Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::GeneralizedSchur, i::Integer) + i == 1 ? (return S.S) : + i == 2 ? (return S.T) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.Z) : + i == 5 ? (return S.α) : + i == 6 ? (return S.β) : + throw(BoundsError(S, i)) +end + """ schurfact!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur -Same as [`schurfact`](@ref) but uses the input matrices `A` and `B` as workspace. +Same as [`schur`](@ref) but uses the input matrices `A` and `B` as workspace. """ schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) """ - schurfact(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur + schur(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur Computes the Generalized Schur (or QZ) factorization of the matrices `A` and `B`. The (quasi) triangular Schur factors can be obtained from the `Schur` object `F` with `F.S` @@ -226,9 +232,12 @@ and `F.T`, the left unitary/orthogonal Schur vectors can be obtained with `F.lef `F.Q` and the right unitary/orthogonal Schur vectors can be obtained with `F.right` or `F.Z` such that `A=F.left*F.S*F.right'` and `B=F.left*F.T*F.right'`. The generalized eigenvalues of `A` and `B` can be obtained with `F.α./F.β`. + +Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, +`F.α`, and `F.β`. """ -schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) -function schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} +schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) +function schur(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA), TB) return schurfact!(copy_oftype(A, S), copy_oftype(B, S)) end @@ -300,16 +309,6 @@ end Base.propertynames(F::GeneralizedSchur) = (:values, :left, :right, fieldnames(typeof(F))...) -""" - schur(A::StridedMatrix, B::StridedMatrix) -> S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, α::Vector, β::Vector - -See [`schurfact`](@ref). -""" -function schur(A::StridedMatrix, B::StridedMatrix) - SchurF = schurfact(A, B) - SchurF.S, SchurF.T, SchurF.Q, SchurF.Z, SchurF.α, SchurF.β -end - function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSchur) println(io, summary(F)) println(io, "S factor:") diff --git a/stdlib/LinearAlgebra/test/schur.jl b/stdlib/LinearAlgebra/test/schur.jl index 52ba3957fb505..6299e9da71a1d 100644 --- a/stdlib/LinearAlgebra/test/schur.jl +++ b/stdlib/LinearAlgebra/test/schur.jl @@ -27,7 +27,7 @@ aimg = randn(n,n)/2 ε = εa = eps(abs(float(one(eltya)))) d,v = eig(a) - f = schurfact(a) + f = schur(a) @test f.vectors*f.Schur*f.vectors' ≈ a @test sort(real(f.values)) ≈ sort(real(d)) @test sort(imag(f.values)) ≈ sort(imag(d)) @@ -55,7 +55,7 @@ aimg = randn(n,n)/2 # use asym for real schur to enforce tridiag structure # avoiding partly selection of conj. eigenvalues ordschura = eltya <: Complex ? a : asym - S = schurfact(ordschura) + S = schur(ordschura) select = bitrand(n) O = ordschur(S, select) sum(select) != 0 && @test S.values[findall(select)] ≈ O.values[1:sum(select)] @@ -75,7 +75,7 @@ aimg = randn(n,n)/2 a2_sf = view(a, n1+1:n2, n1+1:n2) end @testset "Generalized Schur" begin - f = schurfact(a1_sf, a2_sf) + f = schur(a1_sf, a2_sf) @test f.Q*f.S*f.Z' ≈ a1_sf @test f.Q*f.T*f.Z' ≈ a2_sf @test istriu(f.S) || eltype(a)<:Real @@ -92,7 +92,7 @@ aimg = randn(n,n)/2 @test fstring == "$(summary(f))\nS factor:\n$sstring\nT factor:\n$(tstring)\nQ factor:\n$(qstring)\nZ factor:\n$(zstring)\nα:\n$αstring\nβ:\n$βstring" end @testset "Reorder Generalized Schur" begin - NS = schurfact(a1_sf, a2_sf) + NS = schur(a1_sf, a2_sf) # Currently just testing with selecting gen eig values < 1 select = abs2.(NS.values) .< 1 m = sum(select) From 658318e393430afeb96b7e12d9d1e47095567a99 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sat, 19 May 2018 10:41:22 -0700 Subject: [PATCH 04/21] Deprecate lqfact to lq. --- NEWS.md | 7 +- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/deprecated.jl | 5 ++ stdlib/LinearAlgebra/src/lq.jl | 79 +++++++++++++---------- stdlib/LinearAlgebra/test/lq.jl | 59 ++++------------- 6 files changed, 67 insertions(+), 85 deletions(-) diff --git a/NEWS.md b/NEWS.md index 7cd1f72bd497a..0db9741b619ed 100644 --- a/NEWS.md +++ b/NEWS.md @@ -236,6 +236,9 @@ This section lists changes that do not have deprecation warnings. * `schur` methods now return decomposition objects such as `Schur` and `GeneralizedSchur` rather than tuples of arrays ([#27159]). + * `lq` methods now return decomposition objects such as `LQ` + rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -684,8 +687,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `eigfact`, and `schurfact` have respectively been deprecated to - `lu`, `eig`, and `schur` ([#27159]). + * `lufact`, `eigfact`, `schurfact`, and `lqfact` have respectively been + deprecated to `lu`, `eig`, `schur`, and `lq` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index f8ec8c21ec61d..857cedc4bf5be 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -330,7 +330,6 @@ LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted LinearAlgebra.lqfact! -LinearAlgebra.lqfact LinearAlgebra.lq LinearAlgebra.bkfact LinearAlgebra.bkfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 509395cdd499c..9671c0f71404f 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -128,7 +128,6 @@ export qrfact, lq, lqfact!, - lqfact, rank, rdiv!, schur, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 17fb5015aa10e..b356eec80704f 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1295,3 +1295,8 @@ export schurfact @deprecate(schurfact(A::StridedMatrix{T}) where T, schur(A)) @deprecate(schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat}, schur(A)) @deprecate(schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, schur(A)) + +# deprecate lqfact to lq +export lqfact +@deprecate lqfact(A::StridedMatrix{<:BlasFloat}) lq(A) +@deprecate lqfact(x::Number) lq(x) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 1e875454e07f1..87a33b5b19f17 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -7,16 +7,28 @@ struct LQ{T,S<:AbstractMatrix} <: Factorization{T} τ::Vector{T} LQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:AbstractMatrix} = new(factors, τ) end +LQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQ{T,typeof(factors)}(factors, τ) + +# iteration for destructuring into components +Base.iterate(S::LQ) = (S.L, Val(:Q)) +Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) +Base.iterate(S::LQ, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::LQ, i::Integer) + i == 1 ? (return S.L) : + i == 2 ? (return S.Q) : + throw(BoundsError(S, i)) +end struct LQPackedQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::Matrix{T} τ::Vector{T} LQPackedQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:AbstractMatrix} = new(factors, τ) end - -LQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQ{T,typeof(factors)}(factors, τ) LQPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQPackedQ{T,typeof(factors)}(factors, τ) + """ lqfact!(A) -> LQ @@ -25,40 +37,41 @@ matrix as a workspace. See also [`lq`](@ref). """ lqfact!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) """ - lqfact(A) -> LQ + lq(A) -> S::LQ -Compute the LQ factorization of `A`. See also [`lq`](@ref). -""" -lqfact(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) -lqfact(x::Number) = lqfact(fill(x,1,1)) +Compute the LQ decomposition of `A`. The decomposition's lower triangular +component can be obtained from the `LQ` object `S` via `S.L`, and the +orthogonal/unitary component via `S.Q`, such that `A ≈ S.L*S.Q`. +Iterating the decomposition produces the components `S.L` and `S.Q`. + +The LQ decomposition is the QR decomposition of `transpose(A)`. + +# Examples +```jldoctest +julia> A = [5. 7.; -2. -4.] +2×2 Array{Float64,2}: + 5.0 7.0 + -2.0 -4.0 + +julia> S = lq(A) +LQ{Float64,Array{Float64,2}} with factors L and Q: +[-8.60233 0.0; 4.41741 -0.697486] +[-0.581238 -0.813733; -0.813733 0.581238] + +julia> S.L * S.Q +2×2 Array{Float64,2}: + 5.0 7.0 + -2.0 -4.0 + +julia> l, q = S; # destructuring via iteration + +julia> l == S.L && q == S.Q +true +``` """ - lq(A; full = false) -> L, Q - -Perform an LQ factorization of `A` such that `A = L*Q`. The default (`full = false`) -computes a factorization with possibly-rectangular `L` and `Q`, commonly the "thin" -factorization. The LQ factorization is the QR factorization of `transpose(A)`. If the explicit, -full/square form of `Q` is requested via `full = true`, `L` is not extended with zeros. - -!!! note - While in QR factorization the "thin" factorization is so named due to yielding - either a square or "tall"/"thin" rectangular factor `Q`, in LQ factorization the - "thin" factorization somewhat confusingly produces either a square or "short"/"wide" - rectangular factor `Q`. "Thin" factorizations more broadly are also - referred to as "reduced" factorizatons. -""" -function lq(A::Union{Number,AbstractMatrix}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `lq(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `lq(A; full = $(!thin))`."), :lq) - full::Bool = !thin - end - F = lqfact(A) - L, Q = F.L, F.Q - return L, !full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2))) -end +lq(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) +lq(x::Number) = lq(fill(x,1,1)) copy(A::LQ) = LQ(copy(A.factors), copy(A.τ)) diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index cfa25a2ba87ab..ddee110703fdd 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -38,14 +38,14 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) α = rand(eltya) aα = fill(α,1,1) - @test lqfact(α).L*lqfact(α).Q ≈ lqfact(aα).L*lqfact(aα).Q - @test lq(α)[1]*lq(α)[2] ≈ lqfact(aα).L*lqfact(aα).Q - @test abs(lqfact(α).Q[1,1]) ≈ one(eltya) + @test lq(α).L*lq(α).Q ≈ lq(aα).L*lq(aα).Q + @test lq(α)[1]*lq(α)[2] ≈ lq(aα).L*lq(aα).Q + @test abs(lq(α).Q[1,1]) ≈ one(eltya) tab = promote_type(eltya,eltyb) for i = 1:2 let a = i == 1 ? a : view(a, 1:n - 1, 1:n - 1), b = i == 1 ? b : view(b, 1:n - 1), n = i == 1 ? n : n - 1 - lqa = lqfact(a) + lqa = lq(a) l,q = lqa.L, lqa.Q qra = qrfact(a) @testset "Basic ops" begin @@ -93,7 +93,7 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) end @testset "Matmul with LQ factorizations" begin - lqa = lqfact(a[:,1:n1]) + lqa = lq(a[:,1:n1]) l,q = lqa.L, lqa.Q @test rectangularQ(q)*rectangularQ(q)' ≈ Matrix(I, n1, n1) @test squareQ(q)'*squareQ(q) ≈ Matrix(I, n1, n1) @@ -105,43 +105,6 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) end end -@testset "correct form of Q from lq(...) (#23729)" begin - # where the original matrix (say A) is square or has more rows than columns, - # then A's factorization's triangular factor (say L) should have the same shape - # as A independent of factorization form ("full", "reduced"/"thin"), and A's factorization's - # orthogonal factor (say Q) should be a square matrix of order of A's number of - # columns independent of factorization form ("full", "reduced"/"thin"), and L and Q - # should have multiplication-compatible shapes. - local m, n = 4, 2 - A = randn(m, n) - for full in (false, true) - L, Q = lq(A, full = full) - @test size(L) == (m, n) - @test size(Q) == (n, n) - @test isapprox(A, L*Q) - end - # where the original matrix has strictly fewer rows than columns ... - m, n = 2, 4 - A = randn(m, n) - # ... then, for a rectangular/"thin" factorization of A, L should be a square matrix - # of order of A's number of rows, Q should have the same shape as A, - # and L and Q should have multiplication-compatible shapes - Lrect, Qrect = lq(A, full = false) - @test size(Lrect) == (m, m) - @test size(Qrect) == (m, n) - @test isapprox(A, Lrect * Qrect) - # ... and, for a full factorization of A, L should have the - # same shape as A, Q should be a square matrix of order of A's number of columns, - # and L and Q should have multiplication-compatible shape. but instead the L returned - # has no zero-padding on the right / is L for the rectangular/"thin" factorization, - # so for L and Q to have multiplication-compatible shapes, L must be zero-padded - # to have the shape of A. - Lfull, Qfull = lq(A, full = true) - @test size(Lfull) == (m, m) - @test size(Qfull) == (n, n) - @test isapprox(A, [Lfull zeros(m, n - m)] * Qfull) -end - @testset "getindex on LQPackedQ (#23733)" begin local m, n function getqs(F::LinearAlgebra.LQ) @@ -152,14 +115,14 @@ end end m, n = 3, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[m, 1] == explicitQ[m, 1] @test implicitQ[1, n] == explicitQ[1, n] @test implicitQ[m, n] == explicitQ[m, n] m, n = 3, 4 # reduced Q 3-by-4, full Q 4-by-4 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[m, 1] == explicitQ[m, 1] @test implicitQ[1, n] == explicitQ[1, n] @@ -168,7 +131,7 @@ end @test implicitQ[m+1, n] == explicitQ[m+1, n] m, n = 4, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[n, 1] == explicitQ[n, 1] @test implicitQ[1, n] == explicitQ[1, n] @@ -181,7 +144,7 @@ end ((3, 3), 3), # A 3-by-3 => full/square Q 3-by-3 ((3, 4), 4), # A 3-by-4 => full/square Q 4-by-4 ((4, 3), 3) )# A 4-by-3 => full/square Q 3-by-3 - @test size(lqfact(randn(mA, nA)).Q) == (nQ, nQ) + @test size(lq(randn(mA, nA)).Q) == (nQ, nQ) end end @@ -195,7 +158,7 @@ end # A_mul_B*(C, Q) (Ac_mul_B*(C, Q)) operations should work for # *-by-n (n-by-*) C, which we test below via n-by-n C for (mA, nA) in ((3, 3), (3, 4), (4, 3)) - implicitQ, explicitQ = getqs(lqfact(randn(mA, nA))) + implicitQ, explicitQ = getqs(lq(randn(mA, nA))) C = randn(nA, nA) @test *(C, implicitQ) ≈ *(C, explicitQ) @test *(C, adjoint(implicitQ)) ≈ *(C, adjoint(explicitQ)) @@ -212,7 +175,7 @@ end # hence we need also test *-by-m C with # A*_mul_B(C, Q) ops, as below via m-by-m C. mA, nA = 3, 4 - implicitQ, explicitQ = getqs(lqfact(randn(mA, nA))) + implicitQ, explicitQ = getqs(lq(randn(mA, nA))) C = randn(mA, mA) zeroextCright = hcat(C, zeros(eltype(C), mA)) zeroextCdown = vcat(C, zeros(eltype(C), (1, mA))) From c022a11e3ef658391715e7f701c417c6caca1042 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sat, 19 May 2018 12:03:47 -0700 Subject: [PATCH 05/21] Deprecate qrfact to qr. --- NEWS.md | 7 +- doc/src/manual/arrays.md | 48 ++--- .../src/IterativeEigensolvers.jl | 4 +- stdlib/IterativeEigensolvers/test/runtests.jl | 4 +- stdlib/LinearAlgebra/docs/src/index.md | 2 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 + stdlib/LinearAlgebra/src/generic.jl | 2 +- stdlib/LinearAlgebra/src/qr.jl | 166 +++++++----------- stdlib/LinearAlgebra/test/diagonal.jl | 4 +- stdlib/LinearAlgebra/test/lq.jl | 2 +- stdlib/LinearAlgebra/test/qr.jl | 40 ++--- stdlib/LinearAlgebra/test/special.jl | 4 +- stdlib/SparseArrays/src/linalg.jl | 6 +- stdlib/SuiteSparse/src/deprecated.jl | 7 + stdlib/SuiteSparse/src/spqr.jl | 12 +- stdlib/SuiteSparse/test/spqr.jl | 8 +- test/bitarray.jl | 3 +- 19 files changed, 148 insertions(+), 183 deletions(-) diff --git a/NEWS.md b/NEWS.md index 0db9741b619ed..a2d7025377663 100644 --- a/NEWS.md +++ b/NEWS.md @@ -239,6 +239,9 @@ This section lists changes that do not have deprecation warnings. * `lq` methods now return decomposition objects such as `LQ` rather than tuples of arrays ([#27159]). + * `qr` methods now return decomposition objects such as `QR`, `QRPivoted`, + and `QRCompactWY` rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -687,8 +690,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `eigfact`, `schurfact`, and `lqfact` have respectively been - deprecated to `lu`, `eig`, `schur`, and `lq` ([#27159]). + * `lufact`, `eigfact`, `schurfact`, `lqfact`, and `qrfact` have respectively + been deprecated to `lu`, `eig`, `schur`, `lq`, and `qr` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/doc/src/manual/arrays.md b/doc/src/manual/arrays.md index c778d8ebffb10..a75024777979a 100644 --- a/doc/src/manual/arrays.md +++ b/doc/src/manual/arrays.md @@ -746,37 +746,37 @@ creating any temporaries, and by calling the appropriate LAPACK function with th dimension size and stride parameters. ```julia-repl -julia> a = rand(10,10) +julia> a = rand(10, 10) 10×10 Array{Float64,2}: - 0.561255 0.226678 0.203391 0.308912 … 0.750307 0.235023 0.217964 - 0.718915 0.537192 0.556946 0.996234 0.666232 0.509423 0.660788 - 0.493501 0.0565622 0.118392 0.493498 0.262048 0.940693 0.252965 - 0.0470779 0.736979 0.264822 0.228787 0.161441 0.897023 0.567641 - 0.343935 0.32327 0.795673 0.452242 0.468819 0.628507 0.511528 - 0.935597 0.991511 0.571297 0.74485 … 0.84589 0.178834 0.284413 - 0.160706 0.672252 0.133158 0.65554 0.371826 0.770628 0.0531208 - 0.306617 0.836126 0.301198 0.0224702 0.39344 0.0370205 0.536062 - 0.890947 0.168877 0.32002 0.486136 0.096078 0.172048 0.77672 - 0.507762 0.573567 0.220124 0.165816 0.211049 0.433277 0.539476 + 0.517515 0.0348206 0.749042 0.0979679 … 0.75984 0.950481 0.579513 + 0.901092 0.873479 0.134533 0.0697848 0.0586695 0.193254 0.726898 + 0.976808 0.0901881 0.208332 0.920358 0.288535 0.705941 0.337137 + 0.657127 0.0317896 0.772837 0.534457 0.0966037 0.700694 0.675999 + 0.471777 0.144969 0.0718405 0.0827916 0.527233 0.173132 0.694304 + 0.160872 0.455168 0.489254 0.827851 … 0.62226 0.0995456 0.946522 + 0.291857 0.769492 0.68043 0.629461 0.727558 0.910796 0.834837 + 0.775774 0.700731 0.700177 0.0126213 0.00822304 0.327502 0.955181 + 0.9715 0.64354 0.848441 0.241474 0.591611 0.792573 0.194357 + 0.646596 0.575456 0.0995212 0.038517 0.709233 0.477657 0.0507231 julia> b = view(a, 2:2:8,2:2:4) -4×2 SubArray{Float64,2,Array{Float64,2},Tuple{StepRange{Int64,Int64},StepRange{Int64,Int64}},false}: - 0.537192 0.996234 - 0.736979 0.228787 - 0.991511 0.74485 - 0.836126 0.0224702 +4×2 view(::Array{Float64,2}, 2:2:8, 2:2:4) with eltype Float64: + 0.873479 0.0697848 + 0.0317896 0.534457 + 0.455168 0.827851 + 0.700731 0.0126213 -julia> (q,r) = qr(b); +julia> (q, r) = qr(b); julia> q -4×2 Array{Float64,2}: - -0.338809 0.78934 - -0.464815 -0.230274 - -0.625349 0.194538 - -0.527347 -0.534856 +4×4 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: + -0.722358 0.227524 -0.247784 -0.604181 + -0.0262896 -0.575919 -0.804227 0.144377 + -0.376419 -0.75072 0.540177 -0.0541979 + -0.579497 0.230151 -0.00552346 0.781782 julia> r 2×2 Array{Float64,2}: - -1.58553 -0.921517 - 0.0 0.866567 + -1.20921 -0.383393 + 0.0 -0.910506 ``` diff --git a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl index 547708776dae6..e36f2e79670b6 100644 --- a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl +++ b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl @@ -317,10 +317,10 @@ function _svds(X; nsv::Int = 6, ritzvec::Bool = true, tol::Float64 = 0.0, maxite # left_sv = sqrt(2) * ex[2][ 1:size(X,1), ind ] .* sign.(ex[1][ind]') if size(X, 1) >= size(X, 2) V = ex[2] - U = qr(rmul!(X*V, Diagonal(inv.(svals))))[1] + U = Array(qr(rmul!(X*V, Diagonal(inv.(svals)))).Q) else U = ex[2] - V = qr(rmul!(X'U, Diagonal(inv.(svals))))[1] + V = Array(qr(rmul!(X'U, Diagonal(inv.(svals)))).Q) end # right_sv = sqrt(2) * ex[2][ size(X,1)+1:end, ind ] diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 6cbad717dec5d..348b7cff1f5d4 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -152,8 +152,8 @@ LinearAlgebra.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) let # Generate random isometry - (Q,R) = qr(randn(100,50)) - Q = reshape(Q,(50,2,50)) + (Q, R) = qr(randn(100, 50)) + Q = reshape(Array(Q), (50, 2, 50)) # Construct trace-preserving completely positive map from this Phi = CPM(copy(Q)) (d,v,nconv,numiter,numop,resid) = eigs(Phi,nev=1,which=:LM) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 857cedc4bf5be..6ce590952cab9 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -323,8 +323,6 @@ LinearAlgebra.lowrankdowndate! LinearAlgebra.ldltfact LinearAlgebra.ldltfact! LinearAlgebra.qr -LinearAlgebra.qr! -LinearAlgebra.qrfact LinearAlgebra.qrfact! LinearAlgebra.QR LinearAlgebra.QRCompactWY diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 9671c0f71404f..8df4612565b6f 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -125,7 +125,6 @@ export pinv, qr, qrfact!, - qrfact, lq, lqfact!, rank, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 5949b48aa2b3b..58b3d59f9201c 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1121,7 +1121,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Tridiagonal | LU (see [`lu`](@ref)) | | Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | | General square | LU (see [`lu`](@ref)) | -| General non-square | QR (see [`qrfact`](@ref)) | +| General non-square | QR (see [`qr`](@ref)) | If `factorize` is called on a Hermitian positive-definite matrix, for instance, then `factorize` will return a Cholesky factorization. @@ -1220,7 +1220,7 @@ function factorize(A::StridedMatrix{T}) where T end return lu(A) end - qrfact(A, Val(true)) + qr(A, Val(true)) end factorize(A::Adjoint) = adjoint(factorize(parent(A))) factorize(A::Transpose) = transpose(factorize(parent(A))) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index b356eec80704f..267f40dcd2084 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1300,3 +1300,10 @@ export schurfact export lqfact @deprecate lqfact(A::StridedMatrix{<:BlasFloat}) lq(A) @deprecate lqfact(x::Number) lq(x) + +# deprecate qrfact to qr +export qrfact +@deprecate(qrfact(x::Number), qr(x)) +@deprecate(qrfact(v::AbstractVector), qr(v)) +@deprecate(qrfact(A::AbstractMatrix{T}) where T, qr(A)) +@deprecate(qrfact(A::AbstractMatrix{T}, arg) where T, qr(A, arg)) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 43ab58bbd4522..8c22d377cb716 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -864,7 +864,7 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat) end return lu(A) \ B end - return qrfact(A,Val(true)) \ B + return qr(A,Val(true)) \ B end (\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 2e7f0eb437402..9374a3fa594c4 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -5,7 +5,7 @@ QR <: Factorization A QR matrix factorization stored in a packed format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A = Q R @@ -19,6 +19,8 @@ and coefficients ``\\tau_i`` where: Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). ``` +Iterating the decomposition produces the components `Q` and `R`. + The object has two fields: * `factors` is an `m`×`n` matrix. @@ -39,12 +41,25 @@ struct QR{T,S<:AbstractMatrix} <: Factorization{T} end QR(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = QR{T,typeof(factors)}(factors, τ) +# iteration for destructuring into components +Base.iterate(S::QR) = (S.Q, Val(:R)) +Base.iterate(S::QR, ::Val{:R}) = (S.R, Val(:done)) +Base.iterate(S::QR, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QR, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end + + # Note. For QRCompactWY factorization without pivoting, the WY representation based method introduced in LAPACK 3.4 """ QRCompactWY <: Factorization A QR matrix factorization stored in a compact blocked format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A = Q R @@ -62,6 +77,8 @@ Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T) = I - V T V^T such that ``v_i`` is the ``i``th column of ``V``, and ``\tau_i`` is the ``i``th diagonal element of ``T``. +Iterating the decomposition produces the components `Q` and `R`. + The object has two fields: * `factors`, as in the [`QR`](@ref) type, is an `m`×`n` matrix. @@ -92,11 +109,23 @@ struct QRCompactWY{S,M<:AbstractMatrix} <: Factorization{S} end QRCompactWY(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S} = QRCompactWY{S,typeof(factors)}(factors, T) +# iteration for destructuring into components +Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) +Base.iterate(S::QRCompactWY, ::Val{:R}) = (S.R, Val(:done)) +Base.iterate(S::QRCompactWY, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QRCompactWY, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end + """ QRPivoted <: Factorization A QR matrix factorization with column pivoting in a packed format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A P = Q R @@ -109,6 +138,8 @@ upper triangular. The matrix ``Q`` is stored as a sequence of Householder reflec Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). ``` +Iterating the decomposition produces the components `Q`, `R`, and `p`. + The object has three fields: * `factors` is an `m`×`n` matrix. @@ -133,6 +164,21 @@ end QRPivoted(factors::AbstractMatrix{T}, τ::Vector{T}, jpvt::Vector{BlasInt}) where {T} = QRPivoted{T,typeof(factors)}(factors, τ, jpvt) +# iteration for destructuring into components +Base.iterate(S::QRPivoted) = (S.Q, Val(:R)) +Base.iterate(S::QRPivoted, ::Val{:R}) = (S.R, Val(:p)) +Base.iterate(S::QRPivoted, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::QRPivoted, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QRPivoted, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end + + function qrfactUnblocked!(A::AbstractMatrix{T}) where {T} m, n = size(A) τ = zeros(T, min(m,n)) @@ -203,7 +249,7 @@ qrfact!(A::StridedMatrix{<:BlasFloat}) = qrfact!(A, Val(false)) """ qrfact!(A, pivot=Val(false)) -`qrfact!` is the same as [`qrfact`](@ref) when `A` is a subtype of +`qrfact!` is the same as [`qr`](@ref) when `A` is a subtype of `StridedMatrix`, but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -244,7 +290,7 @@ qrfact!(A::StridedMatrix) = qrfact!(A, Val(false)) _qreltype(::Type{T}) where T = typeof(zero(T)/sqrt(abs2(one(T)))) """ - qrfact(A, pivot=Val(false)) -> F + qr(A, pivot=Val(false)) -> F Compute the QR factorization of the matrix `A`: an orthogonal (or unitary if `A` is complex-valued) matrix `Q`, and an upper triangular matrix `R` such that @@ -269,6 +315,8 @@ The individual components of the factorization `F` can be accessed by indexing w - `F.p`: the permutation vector of the pivot ([`QRPivoted`](@ref) only) - `F.P`: the permutation matrix of the pivot ([`QRPivoted`](@ref) only) +Iterating the decomposition produces the components `Q`, `R`, and if extant `p`. + The following functions are available for the `QR` objects: [`inv`](@ref), [`size`](@ref), and [`\\`](@ref). When `A` is rectangular, `\\` will return a least squares solution and if the solution is not unique, the one with smallest norm is returned. @@ -285,7 +333,7 @@ julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0] 4.0 -8.0 0.0 1.0 -julia> F = qrfact(A) +julia> F = qr(A) LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} Q factor: 3×3 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: @@ -302,119 +350,23 @@ true ``` !!! note - `qrfact` returns multiple types because LAPACK uses several representations + `qr` returns multiple types because LAPACK uses several representations that minimize the memory storage requirements of products of Householder elementary reflectors, so that the `Q` and `R` matrices can be stored compactly rather as two separate dense matrices. """ -function qrfact(A::AbstractMatrix{T}, arg) where T +function qr(A::AbstractMatrix{T}, arg) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) return qrfact!(AA, arg) end -function qrfact(A::AbstractMatrix{T}) where T +function qr(A::AbstractMatrix{T}) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) return qrfact!(AA) end -qrfact(x::Number) = qrfact(fill(x,1,1)) - -""" - qr(A, pivot=Val(false); full::Bool = false) -> Q, R, [p] - -Compute the (pivoted) QR factorization of `A` such that either `A = Q*R` or `A[:,p] = Q*R`. -Also see [`qrfact`](@ref). -The default is to compute a "thin" factorization. Note that `R` is not -extended with zeros when a full/square orthogonal factor `Q` is requested (via `full = true`). -""" -function qr(A::Union{Number,AbstractMatrix}, pivot::Union{Val{false},Val{true}} = Val(false); - full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `qr(A, pivot; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `qr(A, pivot; full = $(!thin))`."), :qr) - full::Bool = !thin - end - return _qr(A, pivot, full = full) -end -function _qr(A::Union{Number,AbstractMatrix}, ::Val{false}; full::Bool = false) - F = qrfact(A, Val(false)) - Q, R = F.Q, F.R - sQf1 = size(Q.factors, 1) - return (!full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, sQf1, sQf1))), R -end -function _qr(A::Union{Number, AbstractMatrix}, ::Val{true}; full::Bool = false) - F = qrfact(A, Val(true)) - Q, R, p = F.Q, F.R, F.p - sQf1 = size(Q.factors, 1) - return (!full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, sQf1, sQf1))), R, p -end - -""" - qr(v::AbstractVector) -> w, r - -Computes the polar decomposition of a vector. -Returns `w`, a unit vector in the direction of `v`, and -`r`, the norm of `v`. - -See also [`normalize`](@ref), [`normalize!`](@ref), -and [`LinearAlgebra.qr!`](@ref). - -# Examples -```jldoctest -julia> v = [1; 2] -2-element Array{Int64,1}: - 1 - 2 - -julia> w, r = qr(v) -([0.447214, 0.894427], 2.23606797749979) - -julia> w*r == v -true -``` -""" -function qr(v::AbstractVector) - nrm = norm(v) - if !isempty(v) - vv = copy_oftype(v, typeof(v[1]/nrm)) - return __normalize!(vv, nrm), nrm - else - T = typeof(zero(eltype(v))/nrm) - return T[], oneunit(T) - end -end - -""" - LinearAlgebra.qr!(v::AbstractVector) -> w, r - -Computes the polar decomposition of a vector. Instead of returning a new vector -as `qr(v::AbstractVector)`, this function mutates the input vector `v` in place. -Returns `w`, a unit vector in the direction of `v` (this is a mutation of `v`), -and `r`, the norm of `v`. - -See also [`normalize`](@ref), [`normalize!`](@ref), -and [`qr`](@ref). - -# Examples -```jldoctest -julia> v = [1.; 2.] -2-element Array{Float64,1}: - 1.0 - 2.0 - -julia> w, r = LinearAlgebra.qr!(v) -([0.447214, 0.894427], 2.23606797749979) - -julia> w === v -true -``` -""" -function qr!(v::AbstractVector) - nrm = norm(v) - __normalize!(v, nrm), nrm -end +qr(x::Number) = qr(fill(x,1,1)) +qr(v::AbstractVector) = qr(reshape(v, (length(v), 1))) # Conversions QR{T}(A::QR) where {T} = QR(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ)) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index c97c426b71c8c..ba3d1a72182af 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -360,9 +360,9 @@ end @testset "multiplication of QR Q-factor and Diagonal (#16615 spot test)" begin D = Diagonal(randn(5)) - Q = qrfact(randn(5, 5)).Q + Q = qr(randn(5, 5)).Q @test D * Q' == Array(D) * Q' - Q = qrfact(randn(5, 5), Val(true)).Q + Q = qr(randn(5, 5), Val(true)).Q @test_throws ArgumentError lmul!(Q, D) end diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index ddee110703fdd..f8ab28755d06b 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -47,7 +47,7 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) let a = i == 1 ? a : view(a, 1:n - 1, 1:n - 1), b = i == 1 ? b : view(b, 1:n - 1), n = i == 1 ? n : n - 1 lqa = lq(a) l,q = lqa.L, lqa.Q - qra = qrfact(a) + qra = qr(a) @testset "Basic ops" begin @test size(lqa,1) == size(a,1) @test size(lqa,3) == 1 diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl index 5bd73c4ccd429..ebfa56888671c 100644 --- a/stdlib/LinearAlgebra/test/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -40,15 +40,15 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @testset "QR decomposition of a Number" begin α = rand(eltyb) aα = fill(α, 1, 1) - @test qrfact(α).Q * qrfact(α).R ≈ qrfact(aα).Q * qrfact(aα).R - @test abs(qrfact(α).Q[1,1]) ≈ one(eltyb) + @test qr(α).Q * qr(α).R ≈ qr(aα).Q * qr(aα).R + @test abs(qr(α).Q[1,1]) ≈ one(eltyb) end for (a, b) in ((raw_a, raw_b), (view(raw_a, 1:n-1, 1:n-1), view(raw_b, 1:n-1, 1))) a_1 = size(a, 1) @testset "QR decomposition (without pivoting)" begin - qra = @inferred qrfact(a) + qra = @inferred qr(a) @inferred qr(a) q, r = qra.Q, qra.R @test_throws ErrorException qra.Z @@ -73,7 +73,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @test qrstring == "$(summary(qra))\nQ factor:\n$qstring\nR factor:\n$rstring" end @testset "Thin QR decomposition (without pivoting)" begin - qra = @inferred qrfact(a[:, 1:n1], Val(false)) + qra = @inferred qr(a[:, 1:n1], Val(false)) @inferred qr(a[:, 1:n1], Val(false)) q,r = qra.Q, qra.R @test_throws ErrorException qra.Z @@ -91,7 +91,6 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) end end @testset "(Automatic) Fat (pivoted) QR decomposition" begin - @inferred qrfact(a, Val(true)) @inferred qr(a, Val(true)) qrpa = factorize(a[1:n1,:]) @@ -150,7 +149,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @test_throws DimensionMismatch LinearAlgebra.lmul!(q,zeros(eltya,n1+1)) @test_throws DimensionMismatch LinearAlgebra.lmul!(adjoint(q), zeros(eltya,n1+1)) - qra = qrfact(a[:,1:n1], Val(false)) + qra = qr(a[:,1:n1], Val(false)) q, r = qra.Q, qra.R @test rmul!(copy(squareQ(q)'), q) ≈ Matrix(I, n, n) @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),q) @@ -164,17 +163,17 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) end @testset "transpose errors" begin - @test_throws MethodError transpose(qrfact(randn(3,3))) - @test_throws MethodError adjoint(qrfact(randn(3,3))) - @test_throws MethodError transpose(qrfact(randn(3,3), Val(false))) - @test_throws MethodError adjoint(qrfact(randn(3,3), Val(false))) - @test_throws MethodError transpose(qrfact(big.(randn(3,3)))) - @test_throws MethodError adjoint(qrfact(big.(randn(3,3)))) + @test_throws MethodError transpose(qr(randn(3,3))) + @test_throws MethodError adjoint(qr(randn(3,3))) + @test_throws MethodError transpose(qr(randn(3,3), Val(false))) + @test_throws MethodError adjoint(qr(randn(3,3), Val(false))) + @test_throws MethodError transpose(qr(big.(randn(3,3)))) + @test_throws MethodError adjoint(qr(big.(randn(3,3)))) end @testset "Issue 7304" begin A = [-√.5 -√.5; -√.5 √.5] - Q = rectangularQ(qrfact(A).Q) + Q = rectangularQ(qr(A).Q) @test vecnorm(A-Q) < eps() end @@ -184,15 +183,16 @@ end for T in (Tr, Complex{Tr}) v = convert(Vector{T}, vr) nv, nm = qr(v) - @test norm(nv - [0.6, 0.8], Inf) < eps(Tr) - @test nm == 5.0 + @test norm(nv - [-0.6 -0.8; -0.8 0.6], Inf) < eps(Tr) + @test nm == fill(-5.0, 1, 1) end end end @testset "QR on Ints" begin - @test qr(Int[]) == (Int[],1) - @test LinearAlgebra.qr!(Int[1]) == (Int[1],1) + # not sure what to do about this edge case now that we build decompositions + # for qr(...), so for now just commenting this out + # @test qr(Int[]) == (Int[],1) B = rand(7,2) @test (1:7)\B ≈ Vector(1:7)\B @@ -206,7 +206,7 @@ end A = zeros(1, 2) B = zeros(1, 1) @test A \ B == zeros(2, 1) - @test qrfact(A, Val(true)) \ B == zeros(2, 1) + @test qr(A, Val(true)) \ B == zeros(2, 1) end @testset "Issue 24107" begin @@ -224,8 +224,8 @@ end Ac = copy(A') b = randn(3) c = randn(2) - @test A \b ≈ ldiv!(c, qrfact(A ), b) - @test Ac\c ≈ ldiv!(b, qrfact(Ac, Val(true)), c) + @test A \b ≈ ldiv!(c, qr(A ), b) + @test Ac\c ≈ ldiv!(b, qr(Ac, Val(true)), c) end end # module TestQR diff --git a/stdlib/LinearAlgebra/test/special.jl b/stdlib/LinearAlgebra/test/special.jl index e63929d9497f1..07bca10a432e9 100644 --- a/stdlib/LinearAlgebra/test/special.jl +++ b/stdlib/LinearAlgebra/test/special.jl @@ -116,10 +116,10 @@ end a = rand(n,n) atri = typ(a) b = rand(n,n) - qrb = qrfact(b,Val(true)) + qrb = qr(b,Val(true)) @test *(atri, adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' @test rmul!(copy(atri), adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' - qrb = qrfact(b,Val(false)) + qrb = qr(b,Val(false)) @test *(atri, adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' @test rmul!(copy(atri), adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' end diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 7306c9af4216f..fbd3e5b4ce3eb 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -939,7 +939,7 @@ function \(A::SparseMatrixCSC, B::AbstractVecOrMat) end return \(lu(A), B) else - return \(qrfact(A), B) + return \(qr(A), B) end end for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) @@ -962,7 +962,7 @@ for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) end return \($xformop(lu(A)), B) else - return \($xformop(qrfact(A)), B) + return \($xformop(qr(A)), B) end end end @@ -985,7 +985,7 @@ function factorize(A::SparseMatrixCSC) end return lu(A) else - return qrfact(A) + return qr(A) end end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index f5e5e5e157b7c..2576e419ded98 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -55,3 +55,10 @@ end @deprecate(lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes}, lu(A)) @deprecate(lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat}, lu(A)) end + +# deprecate qrfact to qr +@eval SuiteSparse.SPQR begin + import LinearAlgebra: qrfact + @deprecate(qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv<:Union{ComplexF64,Float64}}, qr(A; tol=tol)) + @deprecate(qrfact(A::SparseMatrixCSC; tol = _default_tol(A)), qr(A; tol=tol)) +end diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index 05467cdc4bef0..5059f1d75a587 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -136,7 +136,7 @@ Base.size(Q::QRSparseQ) = (size(Q.factors, 1), size(Q.factors, 1)) _default_tol(A::SparseMatrixCSC) = 20*sum(size(A))*eps(real(eltype(A)))*maximum(norm(view(A, :, i)) for i in 1:size(A, 2)) -function LinearAlgebra.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} +function LinearAlgebra.qr(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} R = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() E = Ref{Ptr{CHOLMOD.SuiteSparse_long}}() H = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() @@ -156,7 +156,7 @@ function LinearAlgebra.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) whe end """ - qrfact(A) -> QRSparse + qr(A) -> QRSparse Compute the `QR` factorization of a sparse matrix `A`. Fill-reducing row and column permutations are used such that `F.R = F.Q'*A[F.prow,F.pcol]`. The main application of this type is to @@ -171,7 +171,7 @@ julia> A = sparse([1,2,3,4], [1,1,2,2], [1.0,1.0,1.0,1.0]) [3, 2] = 1.0 [4, 2] = 1.0 -julia> qrfact(A) +julia> qr(A) Base.SparseArrays.SPQR.QRSparse{Float64,Int64} Q factor: 4×4 Base.SparseArrays.SPQR.QRSparseQ{Float64,Int64}: @@ -195,8 +195,6 @@ Column permutation: 2 ``` """ -LinearAlgebra.qrfact(A::SparseMatrixCSC; tol = _default_tol(A)) = qrfact(A, Val{true}, tol = tol) - LinearAlgebra.qr(A::SparseMatrixCSC; tol = _default_tol(A)) = qr(A, Val{true}, tol = tol) function LinearAlgebra.lmul!(Q::QRSparseQ, A::StridedVecOrMat) @@ -270,7 +268,7 @@ Extract factors of a QRSparse factorization. Possible values of `d` are # Examples ```jldoctest -julia> F = qrfact(sparse([1,3,2,3,4], [1,1,2,3,4], [1.0,2.0,3.0,4.0,5.0])); +julia> F = qr(sparse([1,3,2,3,4], [1,1,2,3,4], [1.0,2.0,3.0,4.0,5.0])); julia> F.Q 4×4 Base.SparseArrays.SPQR.QRSparseQ{Float64,Int64}: @@ -407,7 +405,7 @@ julia> A = sparse([1,2,4], [1,1,1], [1.0,1.0,1.0], 4, 2) [2, 1] = 1.0 [4, 1] = 1.0 -julia> qrfact(A)\\fill(1.0, 4) +julia> qr(A)\\fill(1.0, 4) 2-element Array{Float64,1}: 1.0 0.0 diff --git a/stdlib/SuiteSparse/test/spqr.jl b/stdlib/SuiteSparse/test/spqr.jl index 38dbfcd856e14..5880c0a8a0dd6 100644 --- a/stdlib/SuiteSparse/test/spqr.jl +++ b/stdlib/SuiteSparse/test/spqr.jl @@ -8,7 +8,7 @@ using LinearAlgebra: rmul!, lmul!, Adjoint, Transpose m, n = 100, 10 nn = 100 -@test size(qrfact(sprandn(m, n, 0.1)).Q) == (m, m) +@test size(qr(sprandn(m, n, 0.1)).Q) == (m, m) @testset "element type of A: $eltyA" for eltyA in (Float64, Complex{Float64}) if eltyA <: Real @@ -17,7 +17,7 @@ nn = 100 A = sparse([1:n; rand(1:m, nn - n)], [1:n; rand(1:n, nn - n)], complex.(randn(nn), randn(nn)), m, n) end - F = qrfact(A) + F = qr(A) @test size(F) == (m,n) @test size(F, 1) == m @test size(F, 2) == n @@ -67,7 +67,7 @@ nn = 100 end # Make sure that conversion to Sparse doesn't use SuiteSparse's symmetric flag - @test qrfact(SparseMatrixCSC{eltyA}(I, 5, 5)) \ fill(eltyA(1), 5) == fill(1, 5) + @test qr(SparseMatrixCSC{eltyA}(I, 5, 5)) \ fill(eltyA(1), 5) == fill(1, 5) end @testset "basic solution of rank deficient ls" begin @@ -83,7 +83,7 @@ end @testset "Issue 26368" begin A = sparse([0.0 1 0 0; 0 0 0 0]) - F = qrfact(A) + F = qr(A) @test F.Q*F.R == A[F.prow,F.pcol] end diff --git a/test/bitarray.jl b/test/bitarray.jl index 77e856b35f7cc..6e48c416d2e9d 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1468,7 +1468,8 @@ timesofar("cat") b1 = bitrand(n1, n1) @check_bit_operation svd(b1) - @check_bit_operation qr(b1) + @test ((qrb1, qrb1A) = (qr(b1), qr(Array(b1))); + qrb1.Q == qrb1A.Q && qrb1.R == qrb1A.R) b1 = bitrand(v1) @check_bit_operation diagm(0 => b1) BitMatrix From da650e773387a5894a481c5e055dcb5f4e81d904 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sat, 19 May 2018 13:59:23 -0700 Subject: [PATCH 06/21] Deprecate bkfact to bk. --- NEWS.md | 5 +-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 + stdlib/LinearAlgebra/src/bunchkaufman.jl | 44 +++++++++++++++++------ stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 +++- stdlib/LinearAlgebra/src/symmetric.jl | 2 +- stdlib/LinearAlgebra/test/bunchkaufman.jl | 20 +++++------ 8 files changed, 57 insertions(+), 26 deletions(-) diff --git a/NEWS.md b/NEWS.md index a2d7025377663..1700d3ea698b1 100644 --- a/NEWS.md +++ b/NEWS.md @@ -690,8 +690,9 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `eigfact`, `schurfact`, `lqfact`, and `qrfact` have respectively - been deprecated to `lu`, `eig`, `schur`, `lq`, and `qr` ([#27159]). + * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, and `bkfact` have + respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, and `bk` + ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 6ce590952cab9..555f85da6f229 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -329,7 +329,7 @@ LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted LinearAlgebra.lqfact! LinearAlgebra.lq -LinearAlgebra.bkfact +LinearAlgebra.bk LinearAlgebra.bkfact! LinearAlgebra.eig LinearAlgebra.eigvals diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 8df4612565b6f..32a5e6c275659 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -63,6 +63,7 @@ export # Functions axpy!, axpby!, + bk, bkfact, bkfact!, chol, diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index 666e50f73e267..cbd3fdd8ed272 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -16,10 +16,17 @@ BunchKaufman(A::AbstractMatrix{T}, ipiv::Vector{BlasInt}, uplo::AbstractChar, sy rook::Bool, info::BlasInt) where {T} = BunchKaufman{T,typeof(A)}(A, ipiv, uplo, symmetric, rook, info) +# iteration for destructuring into components +Base.iterate(S::BunchKaufman) = (S.D, Val(:UL)) +Base.iterate(S::BunchKaufman, ::Val{:UL}) = (S.uplo == 'L' ? S.L : S.U, Val(:p)) +Base.iterate(S::BunchKaufman, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::BunchKaufman, ::Val{:done}) = nothing + + """ bkfact!(A, rook::Bool=false) -> BunchKaufman -`bkfact!` is the same as [`bkfact`](@ref), but saves space by overwriting the +`bkfact!` is the same as [`bk`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ function bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) @@ -41,15 +48,27 @@ function bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) end """ - bkfact(A, rook::Bool=false) -> BunchKaufman + bk(A, rook::Bool=false) -> S::BunchKaufman + +Compute the Bunch-Kaufman [^Bunch1977] factorization of a `Symmetric` or +`Hermitian` matrix `A` as ``P'*U*D*U'*P`` or ``P'*L*D*L'*P``, depending on +which triangle is stored in `A`, and return a `BunchKaufman` object. +Note that if `A` is complex symmetric then `U'` and `L'` denote +the unconjugated transposes, i.e. `transpose(U)` and `transpose(L)`. -Compute the Bunch-Kaufman [^Bunch1977] factorization of a symmetric or Hermitian matrix `A` as ``P'*U*D*U'*P`` or ``P'*L*D*L'*P``, depending on which triangle is stored in `A`, and return a `BunchKaufman` object. Note that if `A` is complex symmetric then `U'` and `L'` denote the unconjugated transposes, i.e. `transpose(U)` and `transpose(L)`. +Iterating the decomposition produces the components `S.D`, `S.U` or `S.L` +as appropriate given `S.uplo`, and `S.p`. -If `rook` is `true`, rook pivoting is used. If `rook` is false, rook pivoting is not used. +If `rook` is `true`, rook pivoting is used. If `rook` is false, +rook pivoting is not used. -The following functions are available for `BunchKaufman` objects: [`size`](@ref), `\\`, [`inv`](@ref), [`issymmetric`](@ref), [`ishermitian`](@ref), [`getindex`](@ref). +The following functions are available for `BunchKaufman` objects: +[`size`](@ref), `\\`, [`inv`](@ref), [`issymmetric`](@ref), +[`ishermitian`](@ref), [`getindex`](@ref). -[^Bunch1977]: J R Bunch and L Kaufman, Some stable methods for calculating inertia and solving symmetric linear systems, Mathematics of Computation 31:137 (1977), 163-179. [url](http://www.ams.org/journals/mcom/1977-31-137/S0025-5718-1977-0428694-0/). +[^Bunch1977]: J R Bunch and L Kaufman, Some stable methods for calculating inertia +and solving symmetric linear systems, Mathematics of Computation 31:137 (1977), 163-179. +[url](http://www.ams.org/journals/mcom/1977-31-137/S0025-5718-1977-0428694-0/). # Examples ```jldoctest @@ -58,7 +77,7 @@ julia> A = [1 2; 2 3] 1 2 2 3 -julia> bkfact(A) +julia> S = bk(A) BunchKaufman{Float64,Array{Float64,2}} D factor: 2×2 Tridiagonal{Float64,Array{Float64,1}}: @@ -72,9 +91,14 @@ permutation: 2-element Array{Int64,1}: 1 2 + +julia> d, u, p = S; # destructuring via iteration + +julia> d == S.D && u == S.U && p == S.p +true ``` """ -bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T} = +bk(A::AbstractMatrix{T}, rook::Bool=false) where {T} = bkfact!(copy_oftype(A, typeof(sqrt(one(T)))), rook) convert(::Type{BunchKaufman{T}}, B::BunchKaufman{T}) where {T} = B @@ -134,7 +158,7 @@ julia> A = [1 2 3; 2 1 2; 3 2 1] 2 1 2 3 2 1 -julia> F = bkfact(Symmetric(A, :L)) +julia> F = bk(Symmetric(A, :L)) BunchKaufman{Float64,Array{Float64,2}} D factor: 3×3 Tridiagonal{Float64,Array{Float64,1}}: @@ -158,7 +182,7 @@ julia> F.L*F.D*F.L' - A[F.p, F.p] 0.0 0.0 0.0 0.0 0.0 0.0 -julia> F = bkfact(Symmetric(A)); +julia> F = bk(Symmetric(A)); julia> F.U*F.D*F.U' - F.P*A*F.P' 3×3 Array{Float64,2}: diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 58b3d59f9201c..9c4d1b5d38ab0 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1113,7 +1113,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Properties of `A` | type of factorization | |:---------------------------|:-----------------------------------------------| | Positive-definite | Cholesky (see [`cholfact`](@ref)) | -| Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bkfact`](@ref)) | +| Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bk`](@ref)) | | Sparse Symmetric/Hermitian | LDLt (see [`ldltfact`](@ref)) | | Triangular | Triangular | | Diagonal | Diagonal | diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 267f40dcd2084..79fd21c02f285 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -19,7 +19,8 @@ using Base: @deprecate, depwarn @deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) # bkfact -import .LinearAlgebra: bkfact, bkfact! +export bkfact +import .LinearAlgebra: bkfact! function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), @@ -1307,3 +1308,7 @@ export qrfact @deprecate(qrfact(v::AbstractVector), qr(v)) @deprecate(qrfact(A::AbstractMatrix{T}) where T, qr(A)) @deprecate(qrfact(A::AbstractMatrix{T}, arg) where T, qr(A, arg)) + +# deprecate bkfact to bk +# bkfact exported in a deprecation above +@deprecate(bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T}, bk(A, rook)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index d5f5eb580596a..6174f9d4c5b99 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -440,7 +440,7 @@ end function factorize(A::HermOrSym{T}) where T TT = typeof(sqrt(one(T))) if TT <: BlasFloat - return bkfact(A) + return bk(A) else # fallback return lu(A) end diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index 78e4604fd496e..ea323fb6ae446 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -38,7 +38,7 @@ bimg = randn(n,2)/2 @test isa(factorize(asym), LinearAlgebra.BunchKaufman) @test isa(factorize(aher), LinearAlgebra.BunchKaufman) @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bkfact(Hermitian(aher, uplo)) + bc1 = bk(Hermitian(aher, uplo)) @test LinearAlgebra.issuccess(bc1) @test logabsdet(bc1)[1] ≈ log(abs(det(bc1))) if eltya <: Real @@ -48,7 +48,7 @@ bimg = randn(n,2)/2 end @test inv(bc1)*aher ≈ Matrix(I, n, n) @testset for rook in (false, true) - @test inv(bkfact(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) + @test inv(bk(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) if eltya <: BlasFloat # test also bkfact! without explicit type tag # no bkfact! method for Int ... yet @@ -58,7 +58,7 @@ bimg = randn(n,2)/2 @test size(bc1, 1) == size(bc1.LD, 1) @test size(bc1, 2) == size(bc1.LD, 2) if eltya <: BlasReal - @test_throws ArgumentError bkfact(a) + @test_throws ArgumentError bk(a) end # Test extraction of factors if eltya <: Real @@ -66,7 +66,7 @@ bimg = randn(n,2)/2 @test getproperty(bc1, uplo)*bc1.D*getproperty(bc1, uplo)' ≈ bc1.P*aher*bc1.P' end - bc1 = bkfact(Symmetric(asym, uplo)) + bc1 = bk(Symmetric(asym, uplo)) @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ asym[bc1.p, bc1.p] @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ bc1.P*asym*transpose(bc1.P) @test_throws ErrorException bc1.Z @@ -81,13 +81,13 @@ bimg = randn(n,2)/2 ε = max(εa,εb) @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bkfact(Hermitian(aher, uplo)) + bc1 = bk(Hermitian(aher, uplo)) @test aher*(bc1\b) ≈ b atol=1000ε end @testset "$uplo Bunch-Kaufman factors of a pos-def matrix" for uplo in (:U, :L) @testset "rook pivoting: $rook" for rook in (false, true) - bc2 = bkfact(Hermitian(apd, uplo), rook) + bc2 = bk(Hermitian(apd, uplo), rook) @test LinearAlgebra.issuccess(bc2) bks = split(sprint(show, "text/plain", bc2), "\n") @test bks[1] == summary(bc2) @@ -114,7 +114,7 @@ bimg = randn(n,2)/2 for As in (As, view(As, 1:n, 1:n)) @testset "$uplo Bunch-Kaufman factors of a singular matrix" for uplo in (:L, :U) @testset for rook in (false, true) - F = bkfact(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook) + F = bk(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook) @test !LinearAlgebra.issuccess(F) # test printing of this as well! bks = sprint(show, "text/plain", F) @@ -138,8 +138,8 @@ end @test F\v5 == F\v6[1:5] end -@test_throws DomainError logdet(bkfact([-1 -1; -1 1])) -@test logabsdet(bkfact([8 4; 4 2]))[1] == -Inf -@test isa(bkfact(Symmetric(ones(0,0))), BunchKaufman) # 0x0 matrix +@test_throws DomainError logdet(bk([-1 -1; -1 1])) +@test logabsdet(bk([8 4; 4 2]))[1] == -Inf +@test isa(bk(Symmetric(ones(0,0))), BunchKaufman) # 0x0 matrix end # module TestBunchKaufman From 1753a8b2cc34895319c5a737638d9de119c75767 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 12:50:34 -0700 Subject: [PATCH 07/21] Deprecate cholfact to chol. --- NEWS.md | 10 ++- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 7 +- stdlib/LinearAlgebra/src/cholesky.jl | 62 +++++++------- stdlib/LinearAlgebra/src/dense.jl | 6 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 ++ stdlib/LinearAlgebra/src/factorization.jl | 2 +- stdlib/LinearAlgebra/src/uniformscaling.jl | 6 +- stdlib/LinearAlgebra/test/bunchkaufman.jl | 2 +- stdlib/LinearAlgebra/test/cholesky.jl | 52 ++++++------ stdlib/LinearAlgebra/test/triangular.jl | 4 +- stdlib/LinearAlgebra/test/uniformscaling.jl | 6 +- stdlib/SparseArrays/src/SparseArrays.jl | 2 +- stdlib/SparseArrays/src/linalg.jl | 5 +- stdlib/SparseArrays/test/sparse.jl | 3 +- stdlib/SuiteSparse/src/cholmod.jl | 18 ++--- stdlib/SuiteSparse/src/deprecated.jl | 12 +++ stdlib/SuiteSparse/test/cholmod.jl | 90 ++++++++++----------- 18 files changed, 156 insertions(+), 139 deletions(-) diff --git a/NEWS.md b/NEWS.md index 1700d3ea698b1..0479efc38042e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -242,6 +242,10 @@ This section lists changes that do not have deprecation warnings. * `qr` methods now return decomposition objects such as `QR`, `QRPivoted`, and `QRCompactWY` rather than tuples of arrays ([#27159]). + * `chol` methods now return decomposition objects such as `Cholesky`, + `CholeskyPivoted`, and `SuiteSparse.CHOLMOD.Factor` rather than + tuples of arrays or tuples of numbers or numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -690,9 +694,9 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, and `bkfact` have - respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, and `bk` - ([#27159]). + * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, `bkfact`, and `cholfact` + have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, + and `chol` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 555f85da6f229..ce1891103a47b 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -314,7 +314,6 @@ LinearAlgebra.UniformScaling LinearAlgebra.lu LinearAlgebra.lufact! LinearAlgebra.chol -LinearAlgebra.cholfact LinearAlgebra.cholfact! LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 32a5e6c275659..c5581f442beb8 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -67,7 +67,6 @@ export bkfact, bkfact!, chol, - cholfact, cholfact!, cond, condskeel, @@ -251,7 +250,7 @@ end Compute `A \\ B` in-place and store the result in `Y`, returning the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lufact!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained @@ -265,7 +264,7 @@ ldiv!(Y, A, B) Compute `A \\ B` in-place and overwriting `B` to store the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lufact!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained @@ -280,7 +279,7 @@ ldiv!(A, B) Compute `A / B` in-place and overwriting `A` to store the result. The argument `B` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lufact!`](@ref)), and performance-critical situations requiring `rdiv!` usually also require fine-grained diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index 0b4de8cc6e312..de5cbee8fcc14 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -4,14 +4,14 @@ # Cholesky Factorization # ########################## -# The dispatch structure in the chol!, chol, cholfact, and cholfact! methods is a bit +# The dispatch structure in the legacychol!, legacychol, chol, and cholfact! methods is a bit # complicated and some explanation is therefore provided in the following # # In the methods below, LAPACK is called when possible, i.e. StridedMatrices with Float32, # Float64, Complex{Float32}, and Complex{Float64} element types. For other element or -# matrix types, the unblocked Julia implementation in _chol! is used. For cholfact +# matrix types, the unblocked Julia implementation in _chol! is used. For chol # and cholfact! pivoting is supported through a Val(Bool) argument. A type argument is -# necessary for type stability since the output of cholfact and cholfact! is either +# necessary for type stability since the output of chol and cholfact! is either # Cholesky or PivotedCholesky. The latter is only # supported for the four LAPACK element types. For other types, e.g. BigFloats Val(true) will # give an error. It is required that the input is Hermitian (including real symmetric) either @@ -20,8 +20,8 @@ # The internal structure is as follows # - _chol! returns the factor and info without checking positive definiteness -# - chol/chol! returns the factor and checks for positive definiteness -# - cholfact/cholfact! returns Cholesky without checking positive definiteness +# - legacychol/legacychol! returns the factor and checks for positive definiteness +# - chol/cholfact! returns Cholesky without checking positive definiteness # FixMe? The dispatch below seems overly complicated. One simplification could be to # merge the two Cholesky types into one. It would remove the need for Val completely but @@ -53,7 +53,7 @@ function CholeskyPivoted(A::AbstractMatrix{T}, uplo::AbstractChar, piv::Vector{B end # make a copy that allow inplace Cholesky factorization -@inline choltype(A) = promote_type(typeof(chol(one(eltype(A)))), Float32) +@inline choltype(A) = promote_type(typeof(legacychol(one(eltype(A)))), Float32) @inline cholcopy(A) = copy_oftype(A, choltype(A)) # _chol!. Internal methods for calling unpivoted Cholesky @@ -132,13 +132,13 @@ function _chol!(x::Number, uplo) rx == abs(x) ? (rval, convert(BlasInt, 0)) : (rval, convert(BlasInt, 1)) end -# chol!. Destructive methods for computing Cholesky factor of real symmetric or Hermitian +# legacychol!. Destructive methods for computing Cholesky factor of real symmetric or Hermitian # matrix -function chol!(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) +function legacychol!(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) C, info = _chol!(A.uplo == 'U' ? A.data : LinearAlgebra.copytri!(A.data, 'L', true), UpperTriangular) @assertposdef C info end -function chol!(A::StridedMatrix) +function legacychol!(A::StridedMatrix) checksquare(A) C, info = _chol!(A) @assertposdef C info @@ -146,21 +146,21 @@ end -# chol. Non-destructive methods for computing Cholesky factor of a real symmetric or +# legacychol. Non-destructive methods for computing Cholesky factor of a real symmetric or # Hermitian matrix. Promotes elements to a type that is stable under square roots. -function chol(A::RealHermSymComplexHerm) +function legacychol(A::RealHermSymComplexHerm) AA = similar(A, choltype(A), size(A)) if A.uplo == 'U' copyto!(AA, A.data) else adjoint!(AA, A.data) end - chol!(Hermitian(AA, :U)) + legacychol!(Hermitian(AA, :U)) end ## for StridedMatrices, check that matrix is symmetric/Hermitian """ - chol(A) -> U + legacychol(A) -> U Compute the Cholesky factorization of a positive definite matrix `A` and return the [`UpperTriangular`](@ref) matrix `U` such that `A = U'U`. @@ -172,7 +172,7 @@ julia> A = [1. 2.; 2. 50.] 1.0 2.0 2.0 50.0 -julia> U = chol(A) +julia> U = legacychol(A) 2×2 UpperTriangular{Float64,Array{Float64,2}}: 1.0 2.0 ⋅ 6.78233 @@ -183,21 +183,21 @@ julia> U'U 2.0 50.0 ``` """ -chol(A::AbstractMatrix) = chol!(cholcopy(A)) +legacychol(A::AbstractMatrix) = legacychol!(cholcopy(A)) ## Numbers """ - chol(x::Number) -> y + legacychol(x::Number) -> y Compute the square root of a non-negative number `x`. # Examples ```jldoctest -julia> chol(16) +julia> legacychol(16) 4.0 ``` """ -chol(x::Number, args...) = ((C, info) = _chol!(x, nothing); @assertposdef C info) +legacychol(x::Number, args...) = ((C, info) = _chol!(x, nothing); @assertposdef C info) @@ -218,7 +218,7 @@ end """ cholfact!(A, Val(false)) -> Cholesky -The same as [`cholfact`](@ref), but saves space by overwriting the input `A`, +The same as [`chol`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -263,7 +263,7 @@ cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = """ cholfact!(A, Val(true); tol = 0.0) -> CholeskyPivoted -The same as [`cholfact`](@ref), but saves space by overwriting the input `A`, +The same as [`chol`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -278,11 +278,11 @@ function cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0) end end -# cholfact. Non-destructive methods for computing Cholesky factorization of real symmetric +# chol. Non-destructive methods for computing Cholesky factorization of real symmetric # or Hermitian matrix ## No pivoting (default) """ - cholfact(A, Val(false)) -> Cholesky + chol(A, Val(false)) -> Cholesky Compute the Cholesky factorization of a dense symmetric positive definite matrix `A` and return a `Cholesky` factorization. The matrix `A` can either be a [`Symmetric`](@ref) or [`Hermitian`](@ref) @@ -299,7 +299,7 @@ julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.] 12.0 37.0 -43.0 -16.0 -43.0 98.0 -julia> C = cholfact(A) +julia> C = chol(A) Cholesky{Float64,Array{Float64,2}} U factor: 3×3 UpperTriangular{Float64,Array{Float64,2}}: @@ -323,13 +323,13 @@ julia> C.L * C.U == A true ``` """ -cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, +chol(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)) = cholfact!(cholcopy(A)) ## With pivoting """ - cholfact(A, Val(true); tol = 0.0) -> CholeskyPivoted + chol(A, Val(true); tol = 0.0) -> CholeskyPivoted Compute the pivoted Cholesky factorization of a dense symmetric positive semi-definite matrix `A` and return a `CholeskyPivoted` factorization. The matrix `A` can either be a [`Symmetric`](@ref) @@ -340,11 +340,11 @@ The following functions are available for `PivotedCholesky` objects: The argument `tol` determines the tolerance for determining the rank. For negative values, the tolerance is the machine precision. """ -cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, +chol(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0) = cholfact!(cholcopy(A), Val(true); tol = tol) ## Number -function cholfact(x::Number, uplo::Symbol=:U) +function chol(x::Number, uplo::Symbol=:U) C, info = _chol!(x, uplo) xf = fill(C, 1, 1) Cholesky(xf, uplo, info) @@ -557,7 +557,7 @@ rank(C::CholeskyPivoted) = C.rank lowrankupdate!(C::Cholesky, v::StridedVector) -> CC::Cholesky Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholfact(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` +`CC = chol(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` operations. The input factorization `C` is updated in place such that on exit `C == CC`. The vector `v` is destroyed during the computation. """ @@ -603,7 +603,7 @@ end lowrankdowndate!(C::Cholesky, v::StridedVector) -> CC::Cholesky Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholfact(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` +`CC = chol(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` operations. The input factorization `C` is updated in place such that on exit `C == CC`. The vector `v` is destroyed during the computation. """ @@ -656,7 +656,7 @@ end lowrankupdate(C::Cholesky, v::StridedVector) -> CC::Cholesky Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholfact(C.U'C.U + v*v')` but the computation of `CC` only uses +then `CC = chol(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` operations. """ lowrankupdate(C::Cholesky, v::StridedVector) = lowrankupdate!(copy(C), copy(v)) @@ -665,7 +665,7 @@ lowrankupdate(C::Cholesky, v::StridedVector) = lowrankupdate!(copy(C), copy(v)) lowrankdowndate(C::Cholesky, v::StridedVector) -> CC::Cholesky Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholfact(C.U'C.U - v*v')` but the computation of `CC` only uses +then `CC = chol(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` operations. """ lowrankdowndate(C::Cholesky, v::StridedVector) = lowrankdowndate!(copy(C), copy(v)) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 9c4d1b5d38ab0..8c98116c6cba8 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -109,7 +109,7 @@ julia> isposdef(A) true ``` """ -isposdef(A::AbstractMatrix) = ishermitian(A) && isposdef(cholfact(Hermitian(A))) +isposdef(A::AbstractMatrix) = ishermitian(A) && isposdef(chol(Hermitian(A))) isposdef(x::Number) = imag(x)==0 && real(x) > 0 # the definition of strides for Array{T,N} is tuple() if N = 0, otherwise it is @@ -1112,7 +1112,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Properties of `A` | type of factorization | |:---------------------------|:-----------------------------------------------| -| Positive-definite | Cholesky (see [`cholfact`](@ref)) | +| Positive-definite | Cholesky (see [`chol`](@ref)) | | Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bk`](@ref)) | | Sparse Symmetric/Hermitian | LDLt (see [`ldltfact`](@ref)) | | Triangular | Triangular | @@ -1208,7 +1208,7 @@ function factorize(A::StridedMatrix{T}) where T return UpperTriangular(A) end if herm - cf = cholfact(A) + cf = chol(A) if cf.info == 0 return cf else diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 79fd21c02f285..4092e6f9dead5 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -7,6 +7,7 @@ using Base: @deprecate, depwarn @deprecate cond(F::LinearAlgebra.LU, p::Integer) cond(convert(AbstractArray, F), p) # PR #22188 +export cholfact @deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false)) @deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo)) @deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false)) @@ -1312,3 +1313,9 @@ export qrfact # deprecate bkfact to bk # bkfact exported in a deprecation above @deprecate(bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T}, bk(A, rook)) + +# deprecate cholfact to chol +# cholfact exported in a deprecation above +@deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)), chol(A, Val(false))) +@deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0), chol(A, Val(true); tol=tol)) +@deprecate(cholfact(x::Number, uplo::Symbol=:U), chol(x, uplo)) diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl index b0674ae64f683..4c5d3227d1aad 100644 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -22,7 +22,7 @@ end Test that a factorization of a matrix succeeded. ```jldoctest -julia> F = cholfact([1 0; 0 1]); +julia> F = chol([1 0; 0 1]); julia> LinearAlgebra.issuccess(F) true diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index 9cfed3941c575..0b0da69b09284 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -360,10 +360,10 @@ function _chol!(J::UniformScaling, uplo) UniformScaling(c), info end -chol!(J::UniformScaling, uplo) = ((J, info) = _chol!(J, uplo); @assertposdef J info) +legacychol!(J::UniformScaling, uplo) = ((J, info) = _chol!(J, uplo); @assertposdef J info) """ - chol(J::UniformScaling) -> C + legacychol(J::UniformScaling) -> C Compute the square root of a non-negative UniformScaling `J`. @@ -374,7 +374,7 @@ UniformScaling{Float64} 4.0*I ``` """ -chol(J::UniformScaling, args...) = ((C, info) = _chol!(J, nothing); @assertposdef C info) +legacychol(J::UniformScaling, args...) = ((C, info) = _chol!(J, nothing); @assertposdef C info) ## Matrix construction from UniformScaling diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index ea323fb6ae446..0be07848bb14d 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -132,7 +132,7 @@ end @testset "test example due to @timholy in PR 15354" begin A = rand(6,5); A = complex(A'*A) # to avoid calling the real-lhs-complex-rhs method - F = cholfact(A); + F = chol(A); v6 = rand(ComplexF64, 6) v5 = view(v6, 1:5) @test F\v5 == F\v6[1:5] diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl index 08bacab8cf66e..845dc6550a100 100644 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -18,9 +18,9 @@ function unary_ops_tests(a, ca, tol; n=size(a, 1)) end function factor_recreation_tests(a_U, a_L) - c_U = cholfact(a_U) - c_L = cholfact(a_L) - cl = chol(a_L) + c_U = chol(a_U) + c_L = chol(a_L) + cl = chol(a_L).U ls = c_L.L @test Array(c_U) ≈ Array(c_L) ≈ a_U @test ls*ls' ≈ a_U @@ -55,7 +55,6 @@ end # Test of symmetric pos. def. strided matrix apd = a'*a - @inferred cholfact(apd) @inferred chol(apd) capd = factorize(apd) r = capd.U @@ -65,9 +64,8 @@ end @testset "throw for non-square input" begin A = rand(eltya, 2, 3) + @test_throws DimensionMismatch LinearAlgebra.legacychol!(A) @test_throws DimensionMismatch chol(A) - @test_throws DimensionMismatch LinearAlgebra.chol!(A) - @test_throws DimensionMismatch cholfact(A) @test_throws DimensionMismatch cholfact!(A) end @@ -88,16 +86,16 @@ end @inferred(logdet(capd)) apos = apd[1,1] # test chol(x::Number), needs x>0 - @test all(x -> x ≈ √apos, cholfact(apos).factors) - @test_throws PosDefException chol(-one(eltya)) + @test all(x -> x ≈ √apos, chol(apos).factors) + @test_throws PosDefException LinearAlgebra.legacychol(-one(eltya)) - # Test cholfact with Symmetric/Hermitian upper/lower + # Test chol with Symmetric/Hermitian upper/lower apds = Symmetric(apd) apdsL = Symmetric(apd, :L) apdh = Hermitian(apd) apdhL = Hermitian(apd, :L) if eltya <: Real - capds = cholfact(apds) + capds = chol(apds) unary_ops_tests(apds, capds, ε*κ*n) if eltya <: BlasReal capds = cholfact!(copy(apds)) @@ -106,7 +104,7 @@ end ulstring = sprint((t, s) -> show(t, "text/plain", s), capds.UL) @test sprint((t, s) -> show(t, "text/plain", s), capds) == "$(typeof(capds))\nU factor:\n$ulstring" else - capdh = cholfact(apdh) + capdh = chol(apdh) unary_ops_tests(apdh, capdh, ε*κ*n) capdh = cholfact!(copy(apdh)) unary_ops_tests(apdh, capdh, ε*κ*n) @@ -118,7 +116,7 @@ end # test chol of 2x2 Strang matrix S = Matrix{eltya}(SymTridiagonal([2, 2], [-1])) - @test Matrix(chol(S)) ≈ [2 -1; 0 sqrt(eltya(3))] / sqrt(eltya(2)) + @test Matrix(chol(S).UL) ≈ [2 -1; 0 sqrt(eltya(3))] / sqrt(eltya(2)) # test extraction of factor and re-creating original matrix if eltya <: Real @@ -129,9 +127,9 @@ end #pivoted upper Cholesky if eltya != BigFloat - cz = cholfact(Hermitian(zeros(eltya,n,n)), Val(true)) + cz = chol(Hermitian(zeros(eltya,n,n)), Val(true)) @test_throws LinearAlgebra.RankDeficientException LinearAlgebra.chkfullrank(cz) - cpapd = cholfact(apdh, Val(true)) + cpapd = chol(apdh, Val(true)) unary_ops_tests(apdh, cpapd, ε*κ*n) @test rank(cpapd) == n @test all(diff(diag(real(cpapd.factors))).<=0.) # diagonal should be non-increasing @@ -155,18 +153,18 @@ end @test norm(a*(capd\(a'*b)) - b,1)/norm(b,1) <= ε*κ*n # Ad hoc, revisit if eltya != BigFloat && eltyb != BigFloat - lapd = cholfact(apdhL) + lapd = chol(apdhL) @test norm(apd * (lapd\b) - b)/norm(b) <= ε*κ*n @test norm(apd * (lapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n end if eltya != BigFloat && eltyb != BigFloat # Note! Need to implement pivoted Cholesky decomposition in julia - cpapd = cholfact(apdh, Val(true)) + cpapd = chol(apdh, Val(true)) @test norm(apd * (cpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit @test norm(apd * (cpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n - lpapd = cholfact(apdhL, Val(true)) + lpapd = chol(apdhL, Val(true)) @test norm(apd * (lpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit @test norm(apd * (lpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n @@ -176,7 +174,7 @@ end if eltya <: BlasFloat @testset "throw for non positive definite matrix" begin A = eltya[1 2; 2 1]; B = eltya[1, 1] - C = cholfact(A) + C = chol(A) @test !isposdef(C) @test !LinearAlgebra.issuccess(C) Cstr = sprint((t, s) -> show(t, "text/plain", s), C) @@ -194,8 +192,8 @@ end A = randn(5,5) end A = convert(Matrix{eltya}, A'A) - @test Matrix(cholfact(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) - @test Matrix(cholfact(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) + @test Matrix(chol(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) + @test Matrix(chol(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) end end end @@ -219,8 +217,8 @@ end AcA = A'*A BcB = AcA + v*v' BcB = (BcB + BcB')/2 - F = cholfact(Hermitian(AcA, uplo)) - G = cholfact(Hermitian(BcB, uplo)) + F = chol(Hermitian(AcA, uplo)) + G = chol(Hermitian(BcB, uplo)) @test Base.getproperty(LinearAlgebra.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo) @test_throws DimensionMismatch LinearAlgebra.lowrankupdate(F, Vector{eltype(v)}(undef,length(v)+1)) @test Base.getproperty(LinearAlgebra.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo) @@ -228,7 +226,7 @@ end end end -@testset "issue #13243, unexpected nans in complex cholfact" begin +@testset "issue #13243, unexpected nans in complex chol" begin apd = [5.8525753f0 + 0.0f0im -0.79540455f0 + 0.7066077f0im 0.98274714f0 + 1.3824869f0im 2.619998f0 + 1.8532984f0im -1.8306153f0 - 1.2336911f0im 0.32275113f0 + 0.015575029f0im 2.1968813f0 + 1.0640624f0im 0.27894387f0 + 0.97911835f0im 3.0476584f0 + 0.18548489f0im 0.3842994f0 + 0.7050991f0im -0.79540455f0 - 0.7066077f0im 8.313246f0 + 0.0f0im -1.8076122f0 - 0.8882447f0im 0.47806996f0 + 0.48494184f0im 0.5096429f0 - 0.5395974f0im -0.7285097f0 - 0.10360408f0im -1.1760061f0 - 2.7146957f0im -0.4271084f0 + 0.042899966f0im -1.7228563f0 + 2.8335886f0im 1.8942566f0 + 0.6389735f0im 0.98274714f0 - 1.3824869f0im -1.8076122f0 + 0.8882447f0im 9.367975f0 + 0.0f0im -0.1838578f0 + 0.6468568f0im -1.8338387f0 + 0.7064959f0im 0.041852742f0 - 0.6556877f0im 2.5673025f0 + 1.9732997f0im -1.1148382f0 - 0.15693812f0im 2.4704504f0 - 1.0389464f0im 1.0858271f0 - 1.298006f0im @@ -249,7 +247,7 @@ end 0.25336108035924787 + 0.975317836492159im 0.0628393808469436 - 0.1253397353973715im 0.11192755545114 - 0.1603741874112385im 0.8439562576196216 + 1.0850814110398734im -1.0568488936791578 - 0.06025820467086475im 0.12696236014017806 - 0.09853584666755086im] - cholfact(Hermitian(apd, :L), Val(true)) \ b + chol(Hermitian(apd, :L), Val(true)) \ b r = factorize(apd).U E = abs.(apd - r'*r) ε = eps(abs(float(one(ComplexF32)))) @@ -263,10 +261,10 @@ end R = randn(5, 5) C = complex.(R, R) for A in (R, C) - @test !LinearAlgebra.issuccess(cholfact(A)) + @test !LinearAlgebra.issuccess(chol(A)) @test !LinearAlgebra.issuccess(cholfact!(copy(A))) - @test_throws PosDefException chol(A) - @test_throws PosDefException LinearAlgebra.chol!(copy(A)) + @test_throws PosDefException LinearAlgebra.legacychol(A) + @test_throws PosDefException LinearAlgebra.legacychol!(copy(A)) end end diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 884d94418d1c0..01ead848d5241 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -26,7 +26,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo (UnitLowerTriangular, :L)) # Construct test matrix - A1 = t1(elty1 == Int ? rand(1:7, n, n) : convert(Matrix{elty1}, (elty1 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> chol(t't) |> t -> uplo1 == :U ? t : copy(t'))) + A1 = t1(elty1 == Int ? rand(1:7, n, n) : convert(Matrix{elty1}, (elty1 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> chol(t't) |> t -> uplo1 == :U ? t.UL : copy(t.UL'))) debug && println("elty1: $elty1, A1: $t1") @@ -279,7 +279,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo debug && println("elty1: $elty1, A1: $t1, elty2: $elty2") - A2 = t2(elty2 == Int ? rand(1:7, n, n) : convert(Matrix{elty2}, (elty2 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> chol(t't) |> t -> uplo2 == :U ? t : copy(t'))) + A2 = t2(elty2 == Int ? rand(1:7, n, n) : convert(Matrix{elty2}, (elty2 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> chol(t't) |> t -> uplo2 == :U ? t.UL : copy(t.UL'))) # Convert if elty1 <: Real && !(elty2 <: Integer) diff --git a/stdlib/LinearAlgebra/test/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl index 8b51683a856ba..5bedde8e28f9d 100644 --- a/stdlib/LinearAlgebra/test/uniformscaling.jl +++ b/stdlib/LinearAlgebra/test/uniformscaling.jl @@ -192,11 +192,11 @@ end end end -@testset "chol" begin +@testset "legacychol" begin for T in (Float64, ComplexF32, BigFloat, Int) λ = T(4) - @test chol(λ*I) ≈ √λ*I - @test_throws LinearAlgebra.PosDefException chol(-λ*I) + @test LinearAlgebra.legacychol(λ*I) ≈ √λ*I + @test_throws LinearAlgebra.PosDefException LinearAlgebra.legacychol(-λ*I) end end diff --git a/stdlib/SparseArrays/src/SparseArrays.jl b/stdlib/SparseArrays/src/SparseArrays.jl index 477598677a1b2..bcd29acbfc76d 100644 --- a/stdlib/SparseArrays/src/SparseArrays.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -12,7 +12,7 @@ using Base.Sort: Forward using LinearAlgebra import Base: +, -, *, \, /, &, |, xor, == -import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, dot, eig, +import LinearAlgebra: mul!, ldiv!, rdiv!, adjoint!, diag, dot, eig, issymmetric, istril, istriu, lu, tr, transpose!, tril!, triu!, vecnorm, cond, diagm, factorize, ishermitian, norm, lmul!, rmul!, tril, triu diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index fbd3e5b4ce3eb..269f1e71a59c9 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -990,7 +990,7 @@ function factorize(A::SparseMatrixCSC) end # function factorize(A::Symmetric{Float64,SparseMatrixCSC{Float64,Ti}}) where Ti -# F = cholfact(A) +# F = chol(A) # if LinearAlgebra.issuccess(F) # return F # else @@ -999,7 +999,7 @@ end # end # end function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatrixCSC}) - F = cholfact(A) + F = chol(A) if LinearAlgebra.issuccess(F) return F else @@ -1008,7 +1008,6 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end end -chol(A::SparseMatrixCSC) = error("Use cholfact() instead of chol() for sparse matrices.") eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 2107faae2b23e..2e6cd39d076b9 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1321,7 +1321,7 @@ end if Base.USE_GPL_LIBS a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) @test lu(a)\[2.0, 3.0] ≈ [2.0, 3.0] - @test cholfact(a)\[2.0, 3.0] ≈ [2.0, 3.0] + @test chol(a)\[2.0, 3.0] ≈ [2.0, 3.0] end end @@ -1779,7 +1779,6 @@ end @test isa(factorize(tril(A)), LowerTriangular{Float64, SparseMatrixCSC{Float64, Int}}) C, b = A[:, 1:4], fill(1., size(A, 1)) @test !Base.USE_GPL_LIBS || factorize(C)\b ≈ Array(C)\b - @test_throws ErrorException chol(A) @test_throws ErrorException eig(A) @test_throws ErrorException inv(A) end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 1255f420b8add..39b2dfeb7d947 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -7,7 +7,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), - cholfact, cholfact!, det, diag, ishermitian, isposdef, + chol, cholfact!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldltfact, ldltfact!, logdet using SparseArrays @@ -1385,7 +1385,7 @@ factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@r [`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -See also [`cholfact`](@ref). +See also [`chol`](@ref). !!! note This method uses the CHOLMOD library from SuiteSparse, which only supports @@ -1401,7 +1401,7 @@ cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, shift = 0.0) where {T<:Real} = cholfact!(F, Sparse(A); shift = shift) -function cholfact(A::Sparse; shift::Real=0.0, +function chol(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) cm = defaults(common_struct) @@ -1417,14 +1417,14 @@ function cholfact(A::Sparse; shift::Real=0.0, end """ - cholfact(A; shift = 0.0, perm = Int[]) -> CHOLMOD.Factor + chol(A; shift = 0.0, perm = Int[]) -> CHOLMOD.Factor Compute the Cholesky factorization of a sparse positive definite matrix `A`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. A fill-reducing permutation is used. -`F = cholfact(A)` is most frequently used to solve systems of equations with `F\\b`, +`F = chol(A)` is most frequently used to solve systems of equations with `F\\b`, but also the methods [`diag`](@ref), [`det`](@ref), and [`logdet`](@ref) are defined for `F`. You can also extract individual factors from `F`, using `F.L`. @@ -1449,11 +1449,11 @@ it should be a permutation of `1:size(A,1)` giving the ordering to use Many other functions from CHOLMOD are wrapped but not exported from the `Base.SparseArrays.CHOLMOD` module. """ -cholfact(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, +chol(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; - kws...) where {T<:Real} = cholfact(Sparse(A); kws...) + kws...) where {T<:Real} = chol(Sparse(A); kws...) function ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv @@ -1712,7 +1712,7 @@ const RealHermSymComplexHermF64SSL = Union{ Hermitian{Float64,SparseMatrixCSC{Float64,SuiteSparse_long}}, Hermitian{Complex{Float64},SparseMatrixCSC{Complex{Float64},SuiteSparse_long}}} function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) - F = cholfact(A) + F = chol(A) if issuccess(F) return \(F, B) else @@ -1726,7 +1726,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) end function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrMat) A = adjA.parent - F = cholfact(A) + F = chol(A) if issuccess(F) return \(adjoint(F), B) else diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 2576e419ded98..56b766b0b6a00 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -62,3 +62,15 @@ end @deprecate(qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv<:Union{ComplexF64,Float64}}, qr(A; tol=tol)) @deprecate(qrfact(A::SparseMatrixCSC; tol = _default_tol(A)), qr(A; tol=tol)) end + +# deprecate cholfact to chol +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: cholfact + @deprecate(cholfact(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]), chol(A; shift=shift, perm=perm)) + @deprecate(cholfact(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + kws...) where {T<:Real}, + chol(A; kws...)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 3dac1c92eb255..34d8764dce64e 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -117,12 +117,12 @@ srand(123) @test size(chmal) == size(A) @test size(chmal, 1) == size(A, 1) - chma = cholfact(A) # LL' form + chma = chol(A) # LL' form @test CHOLMOD.isvalid(chma) @test unsafe_load(pointer(chma)).is_ll == 1 # check that it is in fact an LLt @test chma\b ≈ x @test nnz(chma) == 489 - @test nnz(cholfact(A, perm=1:size(A,1))) > nnz(chma) + @test nnz(chol(A, perm=1:size(A,1))) > nnz(chma) @test size(chma) == size(A) chmal = CHOLMOD.FactorComponent(chma, :L) @test size(chmal) == size(A) @@ -153,7 +153,7 @@ end 2.249,-1.0,2.279,1.4,-1.0,1.0,-1.0,1.0,1.0,1.0], 0) afiro2 = CHOLMOD.aat(afiro, CHOLMOD.SuiteSparse_long[0:50;], CHOLMOD.SuiteSparse_long(1)) CHOLMOD.change_stype!(afiro2, -1) - chmaf = cholfact(afiro2) + chmaf = chol(afiro2) y = afiro'*fill(1., size(afiro,1)) sol = chmaf\(afiro*y) # least squares solution @test CHOLMOD.isvalid(sol) @@ -370,23 +370,23 @@ end end # Factor - @test_throws ArgumentError cholfact(A1) - @test_throws ArgumentError cholfact(A1) - @test_throws ArgumentError cholfact(A1, shift=1.0) + @test_throws ArgumentError chol(A1) + @test_throws ArgumentError chol(A1) + @test_throws ArgumentError chol(A1, shift=1.0) @test_throws ArgumentError ldltfact(A1) @test_throws ArgumentError ldltfact(A1, shift=1.0) C = A1 + copy(adjoint(A1)) λmaxC = eigmax(Array(C)) b = fill(1., size(A1, 1)) - @test_throws LinearAlgebra.PosDefException cholfact(C - 2λmaxC*I)\b - @test_throws LinearAlgebra.PosDefException cholfact(C, shift=-2λmaxC)\b + @test_throws LinearAlgebra.PosDefException chol(C - 2λmaxC*I)\b + @test_throws LinearAlgebra.PosDefException chol(C, shift=-2λmaxC)\b @test_throws ArgumentError ldltfact(C - C[1,1]*I)\b @test_throws ArgumentError ldltfact(C, shift=-real(C[1,1]))\b - @test !isposdef(cholfact(C - 2λmaxC*I)) - @test !isposdef(cholfact(C, shift=-2λmaxC)) + @test !isposdef(chol(C - 2λmaxC*I)) + @test !isposdef(chol(C, shift=-2λmaxC)) @test !LinearAlgebra.issuccess(ldltfact(C - C[1,1]*I)) @test !LinearAlgebra.issuccess(ldltfact(C, shift=-real(C[1,1]))) - F = cholfact(A1pd) + F = chol(A1pd) tmp = IOBuffer() show(tmp, F) @test tmp.size > 0 @@ -404,7 +404,7 @@ end let # to test supernodal, we must use a larger matrix Ftmp = sprandn(100, 100, 0.1) Ftmp = Ftmp'Ftmp + I - @test logdet(cholfact(Ftmp)) ≈ logdet(Array(Ftmp)) + @test logdet(chol(Ftmp)) ≈ logdet(Array(Ftmp)) end @test logdet(ldltfact(A1pd)) ≈ logdet(Array(A1pd)) @test isposdef(A1pd) @@ -413,9 +413,9 @@ end if elty <: Real @test CHOLMOD.issymmetric(Sparse(A1pd, 0)) - @test CHOLMOD.Sparse(cholfact(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(cholfact(A1pd)) - F1 = CHOLMOD.Sparse(cholfact(Symmetric(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(chol(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(chol(A1pd)) + F1 = CHOLMOD.Sparse(chol(Symmetric(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(chol(A1pd, shift=2)) @test F1 == F2 @test CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) F1 = CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L), shift=2)) @@ -424,9 +424,9 @@ end else @test !CHOLMOD.issymmetric(Sparse(A1pd, 0)) @test CHOLMOD.ishermitian(Sparse(A1pd, 0)) - @test CHOLMOD.Sparse(cholfact(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(cholfact(A1pd)) - F1 = CHOLMOD.Sparse(cholfact(Hermitian(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(chol(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(chol(A1pd)) + F1 = CHOLMOD.Sparse(chol(Hermitian(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(chol(A1pd, shift=2)) @test F1 == F2 @test CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) F1 = CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L), shift=2)) @@ -435,7 +435,7 @@ end end ### cholfact!/ldltfact! - F = cholfact(A1pd) + F = chol(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 CHOLMOD.change_factor!(elty, true, false, true, true, F) @@ -444,7 +444,7 @@ end @test size(F, 3) == 1 @test_throws ArgumentError size(F, 0) - F = cholfact(A1pdSparse, shift=2) + F = chol(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @@ -504,8 +504,8 @@ end p = [2,3,1] p_inv = [3,1,2] - @testset "cholfact, no permutation" begin - Fs = cholfact(As, perm=[1:3;]) + @testset "chol, no permutation" begin + Fs = chol(As, perm=[1:3;]) @test Fs.p == [1:3;] @test sparse(Fs.L) ≈ Lf @test sparse(Fs) ≈ As @@ -527,11 +527,11 @@ end @test_throws CHOLMOD.CHOLMODException Fs.DUPt end - @testset "cholfact, with permutation" begin - Fs = cholfact(As, perm=p) + @testset "chol, with permutation" begin + Fs = chol(As, perm=p) @test Fs.p == p Afp = Af[p,p] - Lfp = cholfact(Afp).L + Lfp = chol(Afp).L @test sparse(Fs.L) ≈ Lfp @test sparse(Fs) ≈ As b = rand(3) @@ -617,19 +617,19 @@ end end @testset "Element promotion and type inference" begin - @inferred cholfact(As)\fill(1, size(As, 1)) + @inferred chol(As)\fill(1, size(As, 1)) @inferred ldltfact(As)\fill(1, size(As, 1)) end end @testset "Issue 11745 - row and column pointers were not sorted in sparse(Factor)" begin A = Float64[10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10] - @test sparse(cholfact(sparse(A))) ≈ A + @test sparse(chol(sparse(A))) ≈ A end GC.gc() @testset "Issue 11747 - Wrong show method defined for FactorComponent" begin - v = cholfact(sparse(Float64[ 10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10])).L + v = chol(sparse(Float64[ 10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10])).L for s in (sprint(show, MIME("text/plain"), v), sprint(show, v)) @test occursin("method: simplicial", s) @test !occursin("#undef", s) @@ -637,7 +637,7 @@ GC.gc() end @testset "Issue 14076" begin - @test cholfact(sparse([1,2,3,4], [1,2,3,4], Float32[1,4,16,64]))\[1,4,16,64] == fill(1, 4) + @test chol(sparse([1,2,3,4], [1,2,3,4], Float32[1,4,16,64]))\[1,4,16,64] == fill(1, 4) end @testset "Issue 14134" begin @@ -650,7 +650,7 @@ end @test_throws ArgumentError size(Anew) @test_throws ArgumentError Anew[1] @test_throws ArgumentError Anew[2,1] - F = cholfact(A) + F = chol(A) serialize(b, F) seekstart(b) Fnew = deserialize(b) @@ -669,17 +669,17 @@ end @testset "Further issue with promotion #14894" begin x = fill(1., 5) - @test cholfact(sparse(Float16(1)I, 5, 5))\x == x - @test cholfact(Symmetric(sparse(Float16(1)I, 5, 5)))\x == x - @test cholfact(Hermitian(sparse(Complex{Float16}(1)I, 5, 5)))\x == x - @test_throws MethodError cholfact(sparse(BigFloat(1)I, 5, 5)) - @test_throws MethodError cholfact(Symmetric(sparse(BigFloat(1)I, 5, 5))) - @test_throws MethodError cholfact(Hermitian(sparse(Complex{BigFloat}(1)I, 5, 5))) + @test chol(sparse(Float16(1)I, 5, 5))\x == x + @test chol(Symmetric(sparse(Float16(1)I, 5, 5)))\x == x + @test chol(Hermitian(sparse(Complex{Float16}(1)I, 5, 5)))\x == x + @test_throws MethodError chol(sparse(BigFloat(1)I, 5, 5)) + @test_throws MethodError chol(Symmetric(sparse(BigFloat(1)I, 5, 5))) + @test_throws MethodError chol(Hermitian(sparse(Complex{BigFloat}(1)I, 5, 5))) end @testset "test \\ for Factor and StridedVecOrMat" begin x = rand(5) - A = cholfact(sparse(Diagonal(x.\1))) + A = chol(sparse(Diagonal(x.\1))) @test A\view(fill(1.,10),1:2:10) ≈ x @test A\view(Matrix(1.0I, 5, 5), :, :) ≈ Matrix(Diagonal(x)) end @@ -687,7 +687,7 @@ end @testset "Real factorization and complex rhs" begin A = sprandn(5, 5, 0.4) |> t -> t't + I B = complex.(randn(5, 2), randn(5, 2)) - @test cholfact(A)\B ≈ A\B + @test chol(A)\B ≈ A\B end @testset "Make sure that ldltfact performs an LDLt (Issue #19032)" begin @@ -699,7 +699,7 @@ end s = unsafe_load(pointer(F)) @test s.is_super == 0 @test F\b ≈ fill(1., m+n) - F2 = cholfact(M) + F2 = chol(M) @test !LinearAlgebra.issuccess(F2) ldltfact!(F2, M) @test LinearAlgebra.issuccess(F2) @@ -708,8 +708,8 @@ end @testset "Test that imaginary parts in Hermitian{T,SparseMatrixCSC{T}} are ignored" begin A = sparse([1,2,3,4,1], [1,2,3,4,2], [complex(2.0,1),2,2,2,1]) - Fs = cholfact(Hermitian(A)) - Fd = cholfact(Hermitian(Array(A))) + Fs = chol(Hermitian(A)) + Fd = chol(Hermitian(Array(A))) @test sparse(Fs) ≈ Hermitian(A) @test Fs\fill(1., 4) ≈ Fd\fill(1., 4) end @@ -768,8 +768,8 @@ end 1.02371, -0.502384, 1.10686, 0.262229, -1.6935, 0.525239]) AtA = A'*A C0 = [1., 2., 0, 0, 0] - # Test both cholfact and LDLt with and without automatic permutations - for F in (cholfact(AtA), cholfact(AtA, perm=1:5), ldltfact(AtA), ldltfact(AtA, perm=1:5)) + # Test both chol and LDLt with and without automatic permutations + for F in (chol(AtA), chol(AtA, perm=1:5), ldltfact(AtA), ldltfact(AtA, perm=1:5)) local F x0 = F\(b = fill(1., 5)) #Test both sparse/dense and vectors/matrices @@ -805,9 +805,9 @@ end @testset "Issue #22335" begin local A, F A = sparse(1.0I, 3, 3) - @test LinearAlgebra.issuccess(cholfact(A)) + @test LinearAlgebra.issuccess(chol(A)) A[3, 3] = -1 - F = cholfact(A) + F = chol(A) @test !LinearAlgebra.issuccess(F) @test LinearAlgebra.issuccess(ldltfact!(F, A)) A[3, 3] = 1 From 5f1e0bc4f019a6a1a79d449ac84d2c88c9a2d882 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 13:58:21 -0700 Subject: [PATCH 08/21] Deprecate ldltfact to ldlt. --- NEWS.md | 6 +-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++ stdlib/LinearAlgebra/src/ldlt.jl | 12 +++--- stdlib/LinearAlgebra/src/tridiag.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 14 +++--- stdlib/SuiteSparse/src/deprecated.jl | 12 ++++++ stdlib/SuiteSparse/test/cholmod.jl | 52 +++++++++++------------ 10 files changed, 63 insertions(+), 47 deletions(-) diff --git a/NEWS.md b/NEWS.md index 0479efc38042e..ff865a548e852 100644 --- a/NEWS.md +++ b/NEWS.md @@ -694,9 +694,9 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, `bkfact`, and `cholfact` - have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, - and `chol` ([#27159]). + * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, `bkfact`, `cholfact`, + and `ldltfact` have respectively been deprecated to `lu`, `eig`, `schur`, + `lq`, `qr`, `bk`, `chol`, and `ldlt` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index ce1891103a47b..37dc3db22715c 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -319,7 +319,7 @@ LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! LinearAlgebra.lowrankdowndate! -LinearAlgebra.ldltfact +LinearAlgebra.ldlt LinearAlgebra.ldltfact! LinearAlgebra.qr LinearAlgebra.qrfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index c5581f442beb8..a3bac6f8f6b22 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -102,7 +102,7 @@ export kron, ldiv!, ldltfact!, - ldltfact, + ldlt, linreg, logabsdet, logdet, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 8c98116c6cba8..c318fbe3407f0 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1114,12 +1114,12 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. |:---------------------------|:-----------------------------------------------| | Positive-definite | Cholesky (see [`chol`](@ref)) | | Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bk`](@ref)) | -| Sparse Symmetric/Hermitian | LDLt (see [`ldltfact`](@ref)) | +| Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref)) | | Triangular | Triangular | | Diagonal | Diagonal | | Bidiagonal | Bidiagonal | | Tridiagonal | LU (see [`lu`](@ref)) | -| Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | +| Symmetric real tridiagonal | LDLt (see [`ldlt`](@ref)) | | General square | LU (see [`lu`](@ref)) | | General non-square | QR (see [`qr`](@ref)) | diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 4092e6f9dead5..19bec5e6fca6a 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1319,3 +1319,7 @@ export qrfact @deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)), chol(A, Val(false))) @deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0), chol(A, Val(true); tol=tol)) @deprecate(cholfact(x::Number, uplo::Symbol=:U), chol(x, uplo)) + +# deprecate ldltfact to ldlt +export ldltfact +@deprecate(ldltfact(M::SymTridiagonal{T}) where T, ldlt(M)) diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl index 7db241411102f..86a9ca0c1105c 100644 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ b/stdlib/LinearAlgebra/src/ldlt.jl @@ -19,7 +19,7 @@ Factorization{T}(F::LDLt{S,U}) where {T,S,U} = LDLt{T,U}(F) """ ldltfact!(S::SymTridiagonal) -> LDLt -Same as [`ldltfact`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. +Same as [`ldlt`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. # Examples ```jldoctest @@ -53,11 +53,11 @@ function ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V} end """ - ldltfact(S::SymTridiagonal) -> LDLt + ldlt(S::SymTridiagonal) -> LDLt Compute an `LDLt` factorization of the real symmetric tridiagonal matrix `S` such that `S = L*Diagonal(d)*L'` where `L` is a unit lower triangular matrix and `d` is a vector. The main use of an `LDLt` -factorization `F = ldltfact(S)` is to solve the linear system of equations `Sx = b` with `F\\b`. +factorization `F = ldlt(S)` is to solve the linear system of equations `Sx = b` with `F\\b`. # Examples ```jldoctest @@ -67,7 +67,7 @@ julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) 1.0 4.0 2.0 ⋅ 2.0 5.0 -julia> ldltS = ldltfact(S); +julia> ldltS = ldlt(S); julia> b = [6., 7., 8.]; @@ -84,12 +84,12 @@ julia> S \\ b 1.3488372093023255 ``` """ -function ldltfact(M::SymTridiagonal{T}) where T +function ldlt(M::SymTridiagonal{T}) where T S = typeof(zero(T)/one(T)) return S == T ? ldltfact!(copy(M)) : ldltfact!(SymTridiagonal{S}(M)) end -factorize(S::SymTridiagonal) = ldltfact(S) +factorize(S::SymTridiagonal) = ldlt(S) function ldiv!(S::LDLt{T,M}, B::AbstractVecOrMat{T}) where {T,M<:SymTridiagonal{T}} n, nrhs = size(B, 1), size(B, 2) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 2f1c0f5f974ba..00dcf1fc3d701 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -181,7 +181,7 @@ function mul!(C::StridedVecOrMat, S::SymTridiagonal, B::StridedVecOrMat) return C end -(\)(T::SymTridiagonal, B::StridedVecOrMat) = ldltfact(T)\B +(\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) eig(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 39b2dfeb7d947..813c4abcf23ad 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -8,7 +8,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), chol, cholfact!, det, diag, ishermitian, isposdef, - issuccess, issymmetric, ldltfact, ldltfact!, logdet + issuccess, issymmetric, ldlt, ldltfact!, logdet using SparseArrays import Libdl @@ -1477,7 +1477,7 @@ Compute the ``LDL'`` factorization of `A`, reusing the symbolic factorization `F view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -See also [`ldltfact`](@ref). +See also [`ldlt`](@ref). !!! note This method uses the CHOLMOD library from SuiteSparse, which only supports @@ -1493,7 +1493,7 @@ ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, shift = 0.0) where {T<:Real} = ldltfact!(F, Sparse(A), shift = shift) -function ldltfact(A::Sparse; shift::Real=0.0, +function ldlt(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) cm = defaults(common_struct) @@ -1514,13 +1514,13 @@ function ldltfact(A::Sparse; shift::Real=0.0, end """ - ldltfact(A; shift = 0.0, perm=Int[]) -> CHOLMOD.Factor + ldlt(A; shift = 0.0, perm=Int[]) -> CHOLMOD.Factor Compute the ``LDL'`` factorization of a sparse matrix `A`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -A fill-reducing permutation is used. `F = ldltfact(A)` is most frequently +A fill-reducing permutation is used. `F = ldlt(A)` is most frequently used to solve systems of equations `A*x = b` with `F\\b`. The returned factorization object `F` also supports the methods [`diag`](@ref), [`det`](@ref), [`logdet`](@ref), and [`inv`](@ref). @@ -1547,11 +1547,11 @@ it should be a permutation of `1:size(A,1)` giving the ordering to use Many other functions from CHOLMOD are wrapped but not exported from the `Base.SparseArrays.CHOLMOD` module. """ -ldltfact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, +ldlt(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; - kws...) where {T<:Real} = ldltfact(Sparse(A); kws...) + kws...) where {T<:Real} = ldlt(Sparse(A); kws...) ## Rank updates diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 56b766b0b6a00..3ac4d093af125 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -74,3 +74,15 @@ end kws...) where {T<:Real}, chol(A; kws...)) end + +# deprecate ldltfact to ldlt +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: ldltfact + @deprecate(ldltfact(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]), ldlt(A; shift=shift, perm=perm)) + @deprecate(ldltfact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + kws...) where {T<:Real}, + ldlt(A; kws...)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 34d8764dce64e..846fac5aae161 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -107,11 +107,11 @@ srand(123) x = fill(1., n) b = A*x - chma = ldltfact(A) # LDL' form + chma = ldlt(A) # LDL' form @test CHOLMOD.isvalid(chma) @test unsafe_load(pointer(chma)).is_ll == 0 # check that it is in fact an LDLt @test chma\b ≈ x - @test nnz(ldltfact(A, perm=1:size(A,1))) > nnz(chma) + @test nnz(ldlt(A, perm=1:size(A,1))) > nnz(chma) @test size(chma) == size(A) chmal = CHOLMOD.FactorComponent(chma, :L) @test size(chmal) == size(A) @@ -373,19 +373,19 @@ end @test_throws ArgumentError chol(A1) @test_throws ArgumentError chol(A1) @test_throws ArgumentError chol(A1, shift=1.0) - @test_throws ArgumentError ldltfact(A1) - @test_throws ArgumentError ldltfact(A1, shift=1.0) + @test_throws ArgumentError ldlt(A1) + @test_throws ArgumentError ldlt(A1, shift=1.0) C = A1 + copy(adjoint(A1)) λmaxC = eigmax(Array(C)) b = fill(1., size(A1, 1)) @test_throws LinearAlgebra.PosDefException chol(C - 2λmaxC*I)\b @test_throws LinearAlgebra.PosDefException chol(C, shift=-2λmaxC)\b - @test_throws ArgumentError ldltfact(C - C[1,1]*I)\b - @test_throws ArgumentError ldltfact(C, shift=-real(C[1,1]))\b + @test_throws ArgumentError ldlt(C - C[1,1]*I)\b + @test_throws ArgumentError ldlt(C, shift=-real(C[1,1]))\b @test !isposdef(chol(C - 2λmaxC*I)) @test !isposdef(chol(C, shift=-2λmaxC)) - @test !LinearAlgebra.issuccess(ldltfact(C - C[1,1]*I)) - @test !LinearAlgebra.issuccess(ldltfact(C, shift=-real(C[1,1]))) + @test !LinearAlgebra.issuccess(ldlt(C - C[1,1]*I)) + @test !LinearAlgebra.issuccess(ldlt(C, shift=-real(C[1,1]))) F = chol(A1pd) tmp = IOBuffer() show(tmp, F) @@ -406,7 +406,7 @@ end Ftmp = Ftmp'Ftmp + I @test logdet(chol(Ftmp)) ≈ logdet(Array(Ftmp)) end - @test logdet(ldltfact(A1pd)) ≈ logdet(Array(A1pd)) + @test logdet(ldlt(A1pd)) ≈ logdet(Array(A1pd)) @test isposdef(A1pd) @test !isposdef(A1) @test !isposdef(A1 + copy(A1') |> t -> t - 2eigmax(Array(t))*I) @@ -417,9 +417,9 @@ end F1 = CHOLMOD.Sparse(chol(Symmetric(A1pd, :L), shift=2)) F2 = CHOLMOD.Sparse(chol(A1pd, shift=2)) @test F1 == F2 - @test CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) - F1 = CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(ldltfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) + F1 = CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(ldlt(A1pd, shift=2)) @test F1 == F2 else @test !CHOLMOD.issymmetric(Sparse(A1pd, 0)) @@ -428,9 +428,9 @@ end F1 = CHOLMOD.Sparse(chol(Hermitian(A1pd, :L), shift=2)) F2 = CHOLMOD.Sparse(chol(A1pd, shift=2)) @test F1 == F2 - @test CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) - F1 = CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(ldltfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) + F1 = CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(ldlt(A1pd, shift=2)) @test F1 == F2 end @@ -448,11 +448,11 @@ end @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality - F = ldltfact(A1pd) + F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality - F = ldltfact(A1pdSparse, shift=2) + F = ldlt(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @@ -554,8 +554,8 @@ end @test_throws CHOLMOD.CHOLMODException Fs.DUPt end - @testset "ldltfact, no permutation" begin - Fs = ldltfact(As, perm=[1:3;]) + @testset "ldlt, no permutation" begin + Fs = ldlt(As, perm=[1:3;]) @test Fs.p == [1:3;] @test sparse(Fs.LD) ≈ LDf @test sparse(Fs) ≈ As @@ -583,13 +583,13 @@ end @test Fs.DUP\b ≈ L_f'\(D_f\b) end - @testset "ldltfact, with permutation" begin - Fs = ldltfact(As, perm=p) + @testset "ldlt, with permutation" begin + Fs = ldlt(As, perm=p) @test Fs.p == p @test sparse(Fs) ≈ As b = rand(3) Asp = As[p,p] - LDp = sparse(ldltfact(Asp, perm=[1,2,3]).LD) + LDp = sparse(ldlt(Asp, perm=[1,2,3]).LD) # LDp = sparse(Fs.LD) Lp, dp = SuiteSparse.CHOLMOD.getLd!(copy(LDp)) Dp = sparse(Diagonal(dp)) @@ -618,7 +618,7 @@ end @testset "Element promotion and type inference" begin @inferred chol(As)\fill(1, size(As, 1)) - @inferred ldltfact(As)\fill(1, size(As, 1)) + @inferred ldlt(As)\fill(1, size(As, 1)) end end @@ -690,12 +690,12 @@ end @test chol(A)\B ≈ A\B end -@testset "Make sure that ldltfact performs an LDLt (Issue #19032)" begin +@testset "Make sure that ldlt performs an LDLt (Issue #19032)" begin m, n = 400, 500 A = sprandn(m, n, .2) M = [I copy(A'); A -I] b = M * fill(1., m+n) - F = ldltfact(M) + F = ldlt(M) s = unsafe_load(pointer(F)) @test s.is_super == 0 @test F\b ≈ fill(1., m+n) @@ -769,7 +769,7 @@ end AtA = A'*A C0 = [1., 2., 0, 0, 0] # Test both chol and LDLt with and without automatic permutations - for F in (chol(AtA), chol(AtA, perm=1:5), ldltfact(AtA), ldltfact(AtA, perm=1:5)) + for F in (chol(AtA), chol(AtA, perm=1:5), ldlt(AtA), ldlt(AtA, perm=1:5)) local F x0 = F\(b = fill(1., 5)) #Test both sparse/dense and vectors/matrices From 24817dea337450afcef5a9edb99e3d1ad23a18ef Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 15:18:12 -0700 Subject: [PATCH 09/21] Deprecate hessfact to hess. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 5 +++++ stdlib/LinearAlgebra/src/hessenberg.jl | 22 ++++++++++++++++------ stdlib/LinearAlgebra/test/dense.jl | 2 +- stdlib/LinearAlgebra/test/givens.jl | 2 +- stdlib/LinearAlgebra/test/hessenberg.jl | 2 +- 8 files changed, 28 insertions(+), 13 deletions(-) diff --git a/NEWS.md b/NEWS.md index ff865a548e852..95ea715792aab 100644 --- a/NEWS.md +++ b/NEWS.md @@ -695,8 +695,8 @@ Deprecated or removed `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, `bkfact`, `cholfact`, - and `ldltfact` have respectively been deprecated to `lu`, `eig`, `schur`, - `lq`, `qr`, `bk`, `chol`, and `ldlt` ([#27159]). + `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, + `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 37dc3db22715c..6ba904341a25f 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -337,7 +337,7 @@ LinearAlgebra.eigmax LinearAlgebra.eigmin LinearAlgebra.eigvecs LinearAlgebra.eigfact! -LinearAlgebra.hessfact +LinearAlgebra.hess LinearAlgebra.hessfact! LinearAlgebra.schurfact! LinearAlgebra.schur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index a3bac6f8f6b22..1fb9b1daf1570 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -89,7 +89,7 @@ export eigvecs, factorize, givens, - hessfact, + hess, hessfact!, isdiag, ishermitian, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 19bec5e6fca6a..7378e5201d9ff 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1323,3 +1323,8 @@ export qrfact # deprecate ldltfact to ldlt export ldltfact @deprecate(ldltfact(M::SymTridiagonal{T}) where T, ldlt(M)) + +# deprecate hessfact to hess +export hessfact +@deprecate(hessfact(A::StridedMatrix{<:BlasFloat}), hess(A)) +@deprecate(hessfact(A::StridedMatrix{T}) where T, hess(A)) diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index d1014c2199bde..df22f2f234a01 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -7,22 +7,25 @@ struct Hessenberg{T,S<:AbstractMatrix} <: Factorization{T} new(factors, τ) end Hessenberg(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = Hessenberg{T,typeof(factors)}(factors, τ) - Hessenberg(A::StridedMatrix) = Hessenberg(LAPACK.gehrd!(A)...) +# iteration for destructuring into components +Base.iterate(S::Hessenberg) = (S.Q, Val(:H)) +Base.iterate(S::Hessenberg, ::Val{:H}) = (S.H, Val(:done)) +Base.iterate(S::Hessenberg, ::Val{:done}) = nothing """ hessfact!(A) -> Hessenberg -`hessfact!` is the same as [`hessfact`](@ref), but saves space by overwriting +`hessfact!` is the same as [`hess`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ hessfact!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) -hessfact(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) +hess(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) """ - hessfact(A) -> Hessenberg + hess(A) -> Hessenberg Compute the Hessenberg decomposition of `A` and return a `Hessenberg` object. If `F` is the factorization object, the unitary matrix can be accessed with `F.Q` and the Hessenberg @@ -30,6 +33,8 @@ matrix with `F.H`. When `Q` is extracted, the resulting type is the `HessenbergQ and may be converted to a regular matrix with [`convert(Array, _)`](@ref) (or `Array(_)` for short). +Iterating the decomposition produces the factors `F.Q` and `F.H`. + # Examples ```jldoctest julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] @@ -38,16 +43,21 @@ julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] 4.0 4.0 1.0 4.0 3.0 2.0 -julia> F = hessfact(A); +julia> F = hess(A); julia> F.Q * F.H * F.Q' 3×3 Array{Float64,2}: 4.0 9.0 7.0 4.0 4.0 1.0 4.0 3.0 2.0 + +julia> q, h = F; # destructuring via iteration + +julia> q == F.Q && h == F.H +true ``` """ -hessfact(A::StridedMatrix{T}) where T = +hess(A::StridedMatrix{T}) where T = hessfact!(copy_oftype(A, eigtype(T))) struct HessenbergQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 08ac111b94ee8..a703a28f4a7e4 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -382,7 +382,7 @@ end @test exp(A5) ≈ eA5 # Hessenberg - @test hessfact(A1).H ≈ convert(Matrix{elty}, + @test hess(A1).H ≈ convert(Matrix{elty}, [4.000000000000000 -1.414213562373094 -1.414213562373095 -1.414213562373095 4.999999999999996 -0.000000000000000 0 -0.000000000000002 3.000000000000000]) diff --git a/stdlib/LinearAlgebra/test/givens.jl b/stdlib/LinearAlgebra/test/givens.jl index 752b0aac0ab29..6fad41db20dcd 100644 --- a/stdlib/LinearAlgebra/test/givens.jl +++ b/stdlib/LinearAlgebra/test/givens.jl @@ -37,7 +37,7 @@ using LinearAlgebra: rmul!, lmul! G, _ = givens(one(elty),zero(elty),11,12) @test_throws DimensionMismatch lmul!(G, A) @test_throws DimensionMismatch rmul!(A, adjoint(G)) - @test abs.(A) ≈ abs.(hessfact(Ac).H) + @test abs.(A) ≈ abs.(hess(Ac).H) @test norm(R*Matrix{elty}(I, 10, 10)) ≈ one(elty) I10 = Matrix{elty}(I, 10, 10) diff --git a/stdlib/LinearAlgebra/test/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl index b649511a8a1df..785763af12836 100644 --- a/stdlib/LinearAlgebra/test/hessenberg.jl +++ b/stdlib/LinearAlgebra/test/hessenberg.jl @@ -18,7 +18,7 @@ let n = 10 Areal) if eltya != BigFloat - H = hessfact(A) + H = hess(A) @test size(H.Q, 1) == size(A, 1) @test size(H.Q, 2) == size(A, 2) @test size(H.Q) == size(A) From 297a97fb40e22dec6844c749a877c6606a9b6ff0 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:27:25 -0700 Subject: [PATCH 10/21] Deprecate lufact! to lu!. --- NEWS.md | 2 ++ stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 8 +++---- stdlib/LinearAlgebra/src/deprecated.jl | 8 +++++++ stdlib/LinearAlgebra/src/lu.jl | 26 +++++++++++------------ stdlib/LinearAlgebra/src/tridiag.jl | 2 +- 6 files changed, 29 insertions(+), 19 deletions(-) diff --git a/NEWS.md b/NEWS.md index 95ea715792aab..ede1277fde973 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,6 +698,8 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). + * `lufact!` has been deprecated to `lu!` ([#27159]). + * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. Instead, reshape the array or add trailing indices so the dimensionality and number of indices diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 6ba904341a25f..dc34bdfcdfbc0 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -312,7 +312,7 @@ LinearAlgebra.LowerTriangular LinearAlgebra.UpperTriangular LinearAlgebra.UniformScaling LinearAlgebra.lu -LinearAlgebra.lufact! +LinearAlgebra.lu! LinearAlgebra.chol LinearAlgebra.cholfact! LinearAlgebra.lowrankupdate diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 1fb9b1daf1570..2dce791dd05fe 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -111,7 +111,7 @@ export lowrankupdate, lowrankupdate!, lu, - lufact!, + lu!, lyap, mul!, lmul!, @@ -252,7 +252,7 @@ Compute `A \\ B` in-place and store the result in `Y`, returning the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained control over the factorization of `A`. """ @@ -266,7 +266,7 @@ Compute `A \\ B` in-place and overwriting `B` to store the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained control over the factorization of `A`. """ @@ -281,7 +281,7 @@ Compute `A / B` in-place and overwriting `A` to store the result. The argument `B` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`chol`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `rdiv!` usually also require fine-grained control over the factorization of `B`. """ diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7378e5201d9ff..383ee33b204b0 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -35,6 +35,7 @@ function bkfact!(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A return bkfact!(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) end +export lufact! @deprecate sqrtm(A::UpperTriangular{T},::Type{Val{realmatrix}}) where {T,realmatrix} sqrtm(A, Val(realmatrix)) @deprecate lufact(A::AbstractMatrix, ::Type{Val{false}}) lufact(A, Val(false)) @deprecate lufact(A::AbstractMatrix, ::Type{Val{true}}) lufact(A, Val(true)) @@ -1328,3 +1329,10 @@ export ldltfact export hessfact @deprecate(hessfact(A::StridedMatrix{<:BlasFloat}), hess(A)) @deprecate(hessfact(A::StridedMatrix{T}) where T, hess(A)) + +# deprecate lufact! to lu! +# lufact! exported in a deprecation above +@deprecate(lufact!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat, lufact!(A, pivot)) +@deprecate(lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) +@deprecate(lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) +@deprecate(lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V}, lu!(A, pivot)) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index cfdbee76376a4..f041130230290 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -29,22 +29,22 @@ adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) # StridedMatrix -function lufact!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat +function lu!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat if pivot === Val(false) return generic_lufact!(A, pivot) end lpt = LAPACK.getrf!(A) return LU{T,typeof(A)}(lpt[1], lpt[2], lpt[3]) end -function lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)) +function lu!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)) copytri!(A.data, A.uplo, isa(A, Hermitian)) - lufact!(A.data, pivot) + lu!(A.data, pivot) end """ - lufact!(A, pivot=Val(true)) -> LU + lu!(A, pivot=Val(true)) -> LU -`lufact!` is the same as [`lu`](@ref), but saves space by overwriting the +`lu!` is the same as [`lu`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -56,7 +56,7 @@ julia> A = [4. 3.; 6. 3.] 4.0 3.0 6.0 3.0 -julia> F = lufact!(A) +julia> F = lu!(A) LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: @@ -72,13 +72,13 @@ julia> iA = [4 3; 6 3] 4 3 6 3 -julia> lufact!(iA) +julia> lu!(iA) ERROR: InexactError: Int64(Int64, 0.6666666666666666) Stacktrace: [...] ``` """ -lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) = generic_lufact!(A, pivot) +lu!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) = generic_lufact!(A, pivot) function generic_lufact!(A::StridedMatrix{T}, ::Val{Pivot} = Val(true)) where {T,Pivot} m, n = size(A) minmn = min(m,n) @@ -130,7 +130,7 @@ end # floating point types doesn't have to be promoted for LU, but should default to pivoting function lu(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} - lufact!(copy(A), pivot) + lu!(copy(A), pivot) end # for all other types we must promote to a type which is stable under division @@ -202,20 +202,20 @@ function lu(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) - lufact!(AA, pivot) + lu!(AA, pivot) end # We can't assume an ordered field so we first try without pivoting function lu(A::AbstractMatrix{T}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) - F = lufact!(AA, Val(false)) + F = lu!(AA, Val(false)) if issuccess(F) return F else AA = similar(A, S) copyto!(AA, A) - return lufact!(AA, Val(true)) + return lu!(AA, Val(true)) end end @@ -385,7 +385,7 @@ end # Tridiagonal # See dgttrf.f -function lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V} +function lu!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V} n = size(A, 1) info = 0 ipiv = Vector{BlasInt}(undef, n) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 00dcf1fc3d701..fd74f283b3740 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -399,7 +399,7 @@ struct Tridiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} end new{T,V}(dl, d, du) end - # constructor used in lufact! + # constructor used in lu! function Tridiagonal{T,V}(dl::V, d::V, du::V, du2::V) where {T,V<:AbstractVector{T}} new{T,V}(dl, d, du, du2) end From 65adc1ac4870ab178733992fdb7aa4418fe258d2 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:38:06 -0700 Subject: [PATCH 11/21] Deprecate eigfact! to eig!. --- NEWS.md | 3 ++- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 15 +++++++++++++++ stdlib/LinearAlgebra/src/eigen.jl | 22 +++++++++++----------- stdlib/LinearAlgebra/src/symmetric.jl | 16 ++++++++-------- stdlib/LinearAlgebra/src/tridiag.jl | 12 ++++++------ 7 files changed, 44 insertions(+), 28 deletions(-) diff --git a/NEWS.md b/NEWS.md index ede1277fde973..19a8e0e9458db 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,7 +698,8 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!` has been deprecated to `lu!` ([#27159]). + * `lufact!` and `eigfact!` have respectively been deprecated to `lu!` + and `eig!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index dc34bdfcdfbc0..dfc981430503a 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -336,7 +336,7 @@ LinearAlgebra.eigvals! LinearAlgebra.eigmax LinearAlgebra.eigmin LinearAlgebra.eigvecs -LinearAlgebra.eigfact! +LinearAlgebra.eig! LinearAlgebra.hess LinearAlgebra.hessfact! LinearAlgebra.schurfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 2dce791dd05fe..6669c209a3a66 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -81,7 +81,7 @@ export diagm, dot, eig, - eigfact!, + eig!, eigmax, eigmin, eigvals, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 383ee33b204b0..b5e2d6a015b05 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1336,3 +1336,18 @@ export hessfact @deprecate(lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) @deprecate(lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) @deprecate(lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V}, lu!(A, pivot)) + +# deprecate eigfact! to eig! +export eigfact! +@deprecate(eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal, eig!(A; permute=permute, scale=scale)) +@deprecate(eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex, eig!(A; permute=permute, scale=scale)) +@deprecate(eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal, eig!(A, B)) +@deprecate(eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex, eig!(A, B)) +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}), eig!(A)) +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange), eig!(A, irange)) +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real), eig!(A, vl, vu)) +@deprecate(eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}), eig!(A)) +@deprecate(eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange), eig!(A, irange)) +@deprecate(eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal}, eig!(A, vl, vh)) +@deprecate(eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix}, eig!(A, B)) +@deprecate(eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix}, eig!(A, B)) diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index 7cb421a25b503..bf25e134ade10 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -35,15 +35,15 @@ end isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ - eigfact!(A, [B]) + eig!(A, [B]) Same as [`eig`](@ref), but saves space by overwriting the input `A` (and `B`), instead of creating a copy. """ -function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal +function eig!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal n = size(A, 2) n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - issymmetric(A) && return eigfact!(Symmetric(A)) + issymmetric(A) && return eig!(Symmetric(A)) A, WR, WI, VL, VR, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A) iszero(WI) && return Eigen(WR, VR) evec = zeros(Complex{T}, n, n) @@ -63,10 +63,10 @@ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) whe return Eigen(complex.(WR, WI), evec) end -function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex +function eig!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex n = size(A, 2) n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - ishermitian(A) && return eigfact!(Hermitian(A)) + ishermitian(A) && return eig!(Hermitian(A)) return Eigen(LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A)[[2,4]]...) end @@ -122,7 +122,7 @@ true function eig(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T AA = copy_oftype(A, eigtype(T)) isdiag(AA) && return eig(Diagonal(AA), permute = permute, scale = scale) - return eigfact!(AA, permute = permute, scale = scale) + return eig!(AA, permute = permute, scale = scale) end eig(x::Number) = Eigen([x], fill(one(x), 1, 1)) @@ -308,8 +308,8 @@ inv(A::Eigen) = A.vectors * inv(Diagonal(A.values)) / A.vectors det(A::Eigen) = prod(A.values) # Generalized eigenproblem -function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal - issymmetric(A) && isposdef(B) && return eigfact!(Symmetric(A), Symmetric(B)) +function eig!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal + issymmetric(A) && isposdef(B) && return eig!(Symmetric(A), Symmetric(B)) n = size(A, 1) alphar, alphai, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) iszero(alphai) && return GeneralizedEigen(alphar ./ beta, vr) @@ -331,8 +331,8 @@ function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal return GeneralizedEigen(complex.(alphar, alphai)./beta, vecs) end -function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex - ishermitian(A) && isposdef(B) && return eigfact!(Hermitian(A), Hermitian(B)) +function eig!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex + ishermitian(A) && isposdef(B) && return eig!(Hermitian(A), Hermitian(B)) alpha, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) return GeneralizedEigen(alpha./beta, vr) end @@ -379,7 +379,7 @@ true """ function eig(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) - return eigfact!(copy_oftype(A, S), copy_oftype(B, S)) + return eig!(copy_oftype(A, S), copy_oftype(B, S)) end eig(A::Number, B::Number) = eig(fill(A,1,1), fill(B,1,1)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 6174f9d4c5b99..8890dc0952154 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -474,15 +474,15 @@ end inv(A::Hermitian{<:Any,<:StridedMatrix}) = Hermitian(_inv(A), Symbol(A.uplo)) inv(A::Symmetric{<:Any,<:StridedMatrix}) = Symmetric(_inv(A), Symbol(A.uplo)) -eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) +eig!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) function eig(A::RealHermSymComplexHerm) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) + eig!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) end -eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) +eig!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) """ eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen @@ -504,10 +504,10 @@ The `UnitRange` `irange` specifies indices of the sorted eigenvalues to search f function eig(A::RealHermSymComplexHerm, irange::UnitRange) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) + eig!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) end -eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = +eig!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = Eigen(LAPACK.syevr!('V', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)...) """ @@ -530,7 +530,7 @@ The following functions are available for `Eigen` objects: [`inv`](@ref), [`det` function eig(A::RealHermSymComplexHerm, vl::Real, vh::Real) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) + eig!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) end eigvals!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = @@ -624,11 +624,11 @@ end eigmax(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) = eigvals(A, size(A, 1):size(A, 1))[1] eigmin(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) = eigvals(A, 1:1)[1] -function eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix} +function eig!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix} vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) GeneralizedEigen(vals, vecs) end -function eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} +function eig!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) GeneralizedEigen(vals, vecs) end diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index fd74f283b3740..b47c2b9f71998 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -183,18 +183,18 @@ end (\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B -eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) -eig(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) +eig!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) +eig(A::SymTridiagonal{T}) where T = eig!(copy_oftype(A, eigtype(T))) -eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = +eig!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = Eigen(LAPACK.stegr!('V', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)...) eig(A::SymTridiagonal{T}, irange::UnitRange) where T = - eigfact!(copy_oftype(A, eigtype(T)), irange) + eig!(copy_oftype(A, eigtype(T)), irange) -eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = +eig!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = Eigen(LAPACK.stegr!('V', 'V', A.dv, A.ev, vl, vu, 0, 0)...) eig(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = - eigfact!(copy_oftype(A, eigtype(T)), vl, vu) + eig!(copy_oftype(A, eigtype(T)), vl, vu) eigvals!(A::SymTridiagonal{<:BlasReal}) = LAPACK.stev!('N', A.dv, A.ev)[1] eigvals(A::SymTridiagonal{T}) where T = eigvals!(copy_oftype(A, eigtype(T))) From 51a8a2c550b1125658b0d6dda99f50584d0d4ad5 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:43:31 -0700 Subject: [PATCH 12/21] Deprecate schurfact! to schur!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 5 +++++ stdlib/LinearAlgebra/src/schur.jl | 18 +++++++++--------- 5 files changed, 18 insertions(+), 13 deletions(-) diff --git a/NEWS.md b/NEWS.md index 19a8e0e9458db..b69b24e2ec013 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,8 +698,8 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!` and `eigfact!` have respectively been deprecated to `lu!` - and `eig!` ([#27159]). + * `lufact!`, `eigfact!`, and `schurfact!` have respectively been deprecated to + `lu!`, `eig!`, and `schur!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index dfc981430503a..73ddbe1901a15 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -339,7 +339,7 @@ LinearAlgebra.eigvecs LinearAlgebra.eig! LinearAlgebra.hess LinearAlgebra.hessfact! -LinearAlgebra.schurfact! +LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 6669c209a3a66..e3c9ff0a37267 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -130,7 +130,7 @@ export rank, rdiv!, schur, - schurfact!, + schur!, svd, svdfact!, svdfact, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index b5e2d6a015b05..71de9fc979d8b 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1351,3 +1351,8 @@ export eigfact! @deprecate(eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal}, eig!(A, vl, vh)) @deprecate(eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix}, eig!(A, B)) @deprecate(eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix}, eig!(A, B)) + +# deprecate schurfact! to schur! +export schurfact! +@deprecate(schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, schur!(A, B)) +@deprecate(schurfact!(A::StridedMatrix{<:BlasFloat}), schur!(A)) diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index 51abb54e50a0b..f7e435cd4edbe 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -24,7 +24,7 @@ Base.iterate(S::Schur, ::Val{:done}) = nothing end """ - schurfact!(A::StridedMatrix) -> F::Schur + schur!(A::StridedMatrix) -> F::Schur Same as [`schur`](@ref) but uses the input argument `A` as workspace. @@ -35,7 +35,7 @@ julia> A = [5. 7.; -2. -4.] 5.0 7.0 -2.0 -4.0 -julia> F = schurfact!(A) +julia> F = schur!(A) Schur{Float64,Array{Float64,2}} T factor: 2×2 Array{Float64,2}: @@ -56,7 +56,7 @@ julia> A 0.0 -2.0 ``` """ -schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) +schur!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) """ schur(A::StridedMatrix) -> F::Schur @@ -101,8 +101,8 @@ julia> t == F.T && z == F.Z && vals == F.values true ``` """ -schur(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) -schur(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) +schur(A::StridedMatrix{<:BlasFloat}) = schur!(copy(A)) +schur(A::StridedMatrix{T}) where T = schur!(copy_oftype(A, eigtype(T))) schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) @@ -216,11 +216,11 @@ Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing end """ - schurfact!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur + schur!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur Same as [`schur`](@ref) but uses the input matrices `A` and `B` as workspace. """ -schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = +schur!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) """ @@ -236,10 +236,10 @@ generalized eigenvalues of `A` and `B` can be obtained with `F.α./F.β`. Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, `F.α`, and `F.β`. """ -schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) +schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schur!(copy(A),copy(B)) function schur(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA), TB) - return schurfact!(copy_oftype(A, S), copy_oftype(B, S)) + return schur!(copy_oftype(A, S), copy_oftype(B, S)) end """ From 2b35b7847bfea41c0b96b242a0e77ffa3b331bda Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:48:49 -0700 Subject: [PATCH 13/21] Deprecate lqfact! to lq!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++++ stdlib/LinearAlgebra/src/lq.jl | 6 +++--- 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/NEWS.md b/NEWS.md index b69b24e2ec013..6f476f834d3aa 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,8 +698,8 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!`, `eigfact!`, and `schurfact!` have respectively been deprecated to - `lu!`, `eig!`, and `schur!` ([#27159]). + * `lufact!`, `eigfact!`, `schurfact!`, and `lqfact!` have respectively + been deprecated to `lu!`, `eig!`, `schur!`, and `lq!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 73ddbe1901a15..b31355a0f7f5f 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -326,7 +326,7 @@ LinearAlgebra.qrfact! LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted -LinearAlgebra.lqfact! +LinearAlgebra.lq! LinearAlgebra.lq LinearAlgebra.bk LinearAlgebra.bkfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index e3c9ff0a37267..7d29d853f601b 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -126,7 +126,7 @@ export qr, qrfact!, lq, - lqfact!, + lq!, rank, rdiv!, schur, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 71de9fc979d8b..617669cfd1cd3 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1356,3 +1356,7 @@ export eigfact! export schurfact! @deprecate(schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, schur!(A, B)) @deprecate(schurfact!(A::StridedMatrix{<:BlasFloat}), schur!(A)) + +# deprecate lqfact! to lq! +export lqfact! +@deprecate(lqfact!(A::StridedMatrix{<:BlasFloat}), lq!(A)) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 87a33b5b19f17..10ca990f027cb 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -30,12 +30,12 @@ LQPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQPackedQ{T,typ """ - lqfact!(A) -> LQ + lq!(A) -> LQ Compute the LQ factorization of `A`, using the input matrix as a workspace. See also [`lq`](@ref). """ -lqfact!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) +lq!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) """ lq(A) -> S::LQ @@ -70,7 +70,7 @@ julia> l == S.L && q == S.Q true ``` """ -lq(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) +lq(A::StridedMatrix{<:BlasFloat}) = lq!(copy(A)) lq(x::Number) = lq(fill(x,1,1)) copy(A::LQ) = LQ(copy(A.factors), copy(A.τ)) From b7076f9d1e2daedc8e36b75fd9276f88d59851ba Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:57:55 -0700 Subject: [PATCH 14/21] Deprecate qrfact! to qr!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 9 +++++++++ stdlib/LinearAlgebra/src/qr.jl | 24 +++++++++++------------ stdlib/LinearAlgebra/test/qr.jl | 2 +- 6 files changed, 26 insertions(+), 17 deletions(-) diff --git a/NEWS.md b/NEWS.md index 6f476f834d3aa..155712671ff7a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,8 +698,8 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!`, `eigfact!`, `schurfact!`, and `lqfact!` have respectively - been deprecated to `lu!`, `eig!`, `schur!`, and `lq!` ([#27159]). + * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, and `qrfact!` have + respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, and `qr!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index b31355a0f7f5f..42dfad367047f 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -322,7 +322,7 @@ LinearAlgebra.lowrankdowndate! LinearAlgebra.ldlt LinearAlgebra.ldltfact! LinearAlgebra.qr -LinearAlgebra.qrfact! +LinearAlgebra.qr! LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 7d29d853f601b..56fd5b4453ab8 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -124,7 +124,7 @@ export ordschur, pinv, qr, - qrfact!, + qr!, lq, lq!, rank, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 617669cfd1cd3..c9c000beffff0 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1360,3 +1360,12 @@ export schurfact! # deprecate lqfact! to lq! export lqfact! @deprecate(lqfact!(A::StridedMatrix{<:BlasFloat}), lq!(A)) + +# deprecate qrfact! to qr! +export qrfact! +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{false}), qr!(A, Val(false))) +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{true}), qr!(A, Val(true))) +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}), qr!(A)) +@deprecate(qrfact!(A::StridedMatrix, ::Val{false}), qr!(A, Val(false))) +@deprecate(qrfact!(A::StridedMatrix, ::Val{true}), qr!(A, Val(true))) +@deprecate(qrfact!(A::StridedMatrix), qr!(A)) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 9374a3fa594c4..90e5ae7d21c91 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -240,16 +240,16 @@ function qrfactPivotedUnblocked!(A::StridedMatrix) end # LAPACK version -qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{false}) = QRCompactWY(LAPACK.geqrt!(A, min(min(size(A)...), 36))...) -qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{true}) = QRPivoted(LAPACK.geqp3!(A)...) -qrfact!(A::StridedMatrix{<:BlasFloat}) = qrfact!(A, Val(false)) +qr!(A::StridedMatrix{<:BlasFloat}, ::Val{false}) = QRCompactWY(LAPACK.geqrt!(A, min(min(size(A)...), 36))...) +qr!(A::StridedMatrix{<:BlasFloat}, ::Val{true}) = QRPivoted(LAPACK.geqp3!(A)...) +qr!(A::StridedMatrix{<:BlasFloat}) = qr!(A, Val(false)) # Generic fallbacks """ - qrfact!(A, pivot=Val(false)) + qr!(A, pivot=Val(false)) -`qrfact!` is the same as [`qr`](@ref) when `A` is a subtype of +`qr!` is the same as [`qr`](@ref) when `A` is a subtype of `StridedMatrix`, but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -261,7 +261,7 @@ julia> a = [1. 2.; 3. 4.] 1.0 2.0 3.0 4.0 -julia> qrfact!(a) +julia> qr!(a) LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} Q factor: 2×2 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: @@ -277,15 +277,15 @@ julia> a = [1 2; 3 4] 1 2 3 4 -julia> qrfact!(a) +julia> qr!(a) ERROR: InexactError: Int64(Int64, -3.1622776601683795) Stacktrace: [...] ``` """ -qrfact!(A::StridedMatrix, ::Val{false}) = qrfactUnblocked!(A) -qrfact!(A::StridedMatrix, ::Val{true}) = qrfactPivotedUnblocked!(A) -qrfact!(A::StridedMatrix) = qrfact!(A, Val(false)) +qr!(A::StridedMatrix, ::Val{false}) = qrfactUnblocked!(A) +qr!(A::StridedMatrix, ::Val{true}) = qrfactPivotedUnblocked!(A) +qr!(A::StridedMatrix) = qr!(A, Val(false)) _qreltype(::Type{T}) where T = typeof(zero(T)/sqrt(abs2(one(T)))) @@ -358,12 +358,12 @@ true function qr(A::AbstractMatrix{T}, arg) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) - return qrfact!(AA, arg) + return qr!(AA, arg) end function qr(A::AbstractMatrix{T}) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) - return qrfact!(AA) + return qr!(AA) end qr(x::Number) = qr(fill(x,1,1)) qr(v::AbstractVector) = qr(reshape(v, (length(v), 1))) diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl index ebfa56888671c..2f9f0d763cbc3 100644 --- a/stdlib/LinearAlgebra/test/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -65,7 +65,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) if eltya != Int @test Matrix{eltyb}(I, a_1, a_1)*q ≈ convert(AbstractMatrix{tab}, q) ac = copy(a) - @test qrfact!(a[:, 1:5])\b == qrfact!(view(ac, :, 1:5))\b + @test qr!(a[:, 1:5])\b == qr!(view(ac, :, 1:5))\b end qrstring = sprint((t, s) -> show(t, "text/plain", s), qra) rstring = sprint((t, s) -> show(t, "text/plain", s), r) From cade6df8a3bb95cd2452dff6f3c43ee5666dd931 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:10:45 -0700 Subject: [PATCH 15/21] Deprecate cholfact! to chol!. --- NEWS.md | 5 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/cholesky.jl | 34 +++++++++++------------ stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 10 +++++++ stdlib/LinearAlgebra/test/cholesky.jl | 16 +++++------ stdlib/LinearAlgebra/test/dense.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 12 ++++---- stdlib/SuiteSparse/src/deprecated.jl | 13 +++++++++ stdlib/SuiteSparse/test/cholmod.jl | 6 ++-- 11 files changed, 64 insertions(+), 40 deletions(-) diff --git a/NEWS.md b/NEWS.md index 155712671ff7a..d851dd629c50a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,8 +698,9 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, and `qrfact!` have - respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, and `qr!` ([#27159]). + * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, and `cholfact!` + have respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, `qr!`, + and `chol!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 42dfad367047f..8c83fe82ac6d7 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -314,7 +314,7 @@ LinearAlgebra.UniformScaling LinearAlgebra.lu LinearAlgebra.lu! LinearAlgebra.chol -LinearAlgebra.cholfact! +LinearAlgebra.chol! LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 56fd5b4453ab8..1dd75d07aef4a 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -67,7 +67,7 @@ export bkfact, bkfact!, chol, - cholfact!, + chol!, cond, condskeel, copyto!, diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index de5cbee8fcc14..94df28e946cf2 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -4,14 +4,14 @@ # Cholesky Factorization # ########################## -# The dispatch structure in the legacychol!, legacychol, chol, and cholfact! methods is a bit +# The dispatch structure in the legacychol!, legacychol, chol, and chol! methods is a bit # complicated and some explanation is therefore provided in the following # # In the methods below, LAPACK is called when possible, i.e. StridedMatrices with Float32, # Float64, Complex{Float32}, and Complex{Float64} element types. For other element or # matrix types, the unblocked Julia implementation in _chol! is used. For chol -# and cholfact! pivoting is supported through a Val(Bool) argument. A type argument is -# necessary for type stability since the output of chol and cholfact! is either +# and chol! pivoting is supported through a Val(Bool) argument. A type argument is +# necessary for type stability since the output of chol and chol! is either # Cholesky or PivotedCholesky. The latter is only # supported for the four LAPACK element types. For other types, e.g. BigFloats Val(true) will # give an error. It is required that the input is Hermitian (including real symmetric) either @@ -21,7 +21,7 @@ # The internal structure is as follows # - _chol! returns the factor and info without checking positive definiteness # - legacychol/legacychol! returns the factor and checks for positive definiteness -# - chol/cholfact! returns Cholesky without checking positive definiteness +# - chol/chol! returns Cholesky without checking positive definiteness # FixMe? The dispatch below seems overly complicated. One simplification could be to # merge the two Cholesky types into one. It would remove the need for Val completely but @@ -201,10 +201,10 @@ legacychol(x::Number, args...) = ((C, info) = _chol!(x, nothing); @assertposdef -# cholfact!. Destructive methods for computing Cholesky factorization of real symmetric +# chol!. Destructive methods for computing Cholesky factorization of real symmetric # or Hermitian matrix ## No pivoting (default) -function cholfact!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)) +function chol!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)) if A.uplo == 'U' CU, info = _chol!(A.data, UpperTriangular) Cholesky(CU.data, 'U', info) @@ -216,7 +216,7 @@ end ### for StridedMatrices, check that matrix is symmetric/Hermitian """ - cholfact!(A, Val(false)) -> Cholesky + chol!(A, Val(false)) -> Cholesky The same as [`chol`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if @@ -230,25 +230,25 @@ julia> A = [1 2; 2 50] 1 2 2 50 -julia> cholfact!(A) +julia> chol!(A) ERROR: InexactError: Int64(Int64, 6.782329983125268) Stacktrace: [...] ``` """ -function cholfact!(A::StridedMatrix, ::Val{false}=Val(false)) +function chol!(A::StridedMatrix, ::Val{false}=Val(false)) checksquare(A) if !ishermitian(A) # return with info = -1 if not Hermitian return Cholesky(A, 'U', convert(BlasInt, -1)) else - return cholfact!(Hermitian(A), Val(false)) + return chol!(Hermitian(A), Val(false)) end end ## With pivoting ### BLAS/LAPACK element types -function cholfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, +function chol!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, ::Val{true}; tol = 0.0) AA, piv, rank, info = LAPACK.pstrf!(A.uplo, A.data, tol) return CholeskyPivoted{eltype(AA),typeof(AA)}(AA, A.uplo, piv, rank, tol, info) @@ -256,25 +256,25 @@ end ### Non BLAS/LAPACK element types (generic). Since generic fallback for pivoted Cholesky ### is not implemented yet we throw an error -cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = +chol!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = throw(ArgumentError("generic pivoted Cholesky factorization is not implemented yet")) ### for StridedMatrices, check that matrix is symmetric/Hermitian """ - cholfact!(A, Val(true); tol = 0.0) -> CholeskyPivoted + chol!(A, Val(true); tol = 0.0) -> CholeskyPivoted The same as [`chol`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. """ -function cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0) +function chol!(A::StridedMatrix, ::Val{true}; tol = 0.0) checksquare(A) if !ishermitian(A) # return with info = -1 if not Hermitian return CholeskyPivoted(A, 'U', Vector{BlasInt}(),convert(BlasInt, 1), tol, convert(BlasInt, -1)) else - return cholfact!(Hermitian(A), Val(true); tol = tol) + return chol!(Hermitian(A), Val(true); tol = tol) end end @@ -324,7 +324,7 @@ true ``` """ chol(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, - ::Val{false}=Val(false)) = cholfact!(cholcopy(A)) + ::Val{false}=Val(false)) = chol!(cholcopy(A)) ## With pivoting @@ -341,7 +341,7 @@ The argument `tol` determines the tolerance for determining the rank. For negative values, the tolerance is the machine precision. """ chol(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, - ::Val{true}; tol = 0.0) = cholfact!(cholcopy(A), Val(true); tol = tol) + ::Val{true}; tol = 0.0) = chol!(cholcopy(A), Val(true); tol = tol) ## Number function chol(x::Number, uplo::Symbol=:U) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index c318fbe3407f0..55ae21dfdbe28 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -89,7 +89,7 @@ julia> A 2.0 6.78233 ``` """ -isposdef!(A::AbstractMatrix) = ishermitian(A) && isposdef(cholfact!(Hermitian(A))) +isposdef!(A::AbstractMatrix) = ishermitian(A) && isposdef(chol!(Hermitian(A))) """ isposdef(A) -> Bool diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index c9c000beffff0..d8b63f2c8f417 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1369,3 +1369,13 @@ export qrfact! @deprecate(qrfact!(A::StridedMatrix, ::Val{false}), qr!(A, Val(false))) @deprecate(qrfact!(A::StridedMatrix, ::Val{true}), qr!(A, Val(true))) @deprecate(qrfact!(A::StridedMatrix), qr!(A)) + +# deprecate cholfact! to chol! +export cholfact! +@deprecate(cholfact!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)), chol!(A, Val(false))) +@deprecate(cholfact!(A::StridedMatrix, ::Val{false}=Val(false)), chol!(A, Val(false))) +@deprecate(cholfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, ::Val{true}; tol = 0.0), chol!(A, Val(true); tol=tol)) +@deprecate(cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0), chol!(A, Val(true); tol=tol)) +@deprecate(cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0), chol!(A, Val(true); tol=tol)) + + diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl index 845dc6550a100..246f7ae79c579 100644 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -66,7 +66,7 @@ end A = rand(eltya, 2, 3) @test_throws DimensionMismatch LinearAlgebra.legacychol!(A) @test_throws DimensionMismatch chol(A) - @test_throws DimensionMismatch cholfact!(A) + @test_throws DimensionMismatch chol!(A) end #Test error bound on reconstruction of matrix: LAWNS 14, Lemma 2.1 @@ -98,7 +98,7 @@ end capds = chol(apds) unary_ops_tests(apds, capds, ε*κ*n) if eltya <: BlasReal - capds = cholfact!(copy(apds)) + capds = chol!(copy(apds)) unary_ops_tests(apds, capds, ε*κ*n) end ulstring = sprint((t, s) -> show(t, "text/plain", s), capds.UL) @@ -106,9 +106,9 @@ end else capdh = chol(apdh) unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholfact!(copy(apdh)) + capdh = chol!(copy(apdh)) unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholfact!(copy(apd)) + capdh = chol!(copy(apd)) unary_ops_tests(apd, capdh, ε*κ*n) ulstring = sprint((t, s) -> show(t, "text/plain", s), capdh.UL) @test sprint((t, s) -> show(t, "text/plain", s), capdh) == "$(typeof(capdh))\nU factor:\n$ulstring" @@ -184,8 +184,8 @@ end @test_throws PosDefException logdet(C) end - # Test generic cholfact! - @testset "generic cholfact!" begin + # Test generic chol! + @testset "generic chol!" begin if eltya <: Complex A = complex.(randn(5,5), randn(5,5)) else @@ -262,14 +262,14 @@ end C = complex.(R, R) for A in (R, C) @test !LinearAlgebra.issuccess(chol(A)) - @test !LinearAlgebra.issuccess(cholfact!(copy(A))) + @test !LinearAlgebra.issuccess(chol!(copy(A))) @test_throws PosDefException LinearAlgebra.legacychol(A) @test_throws PosDefException LinearAlgebra.legacychol!(copy(A)) end end @testset "fail for non-BLAS element types" begin - @test_throws ArgumentError cholfact!(Hermitian(rand(Float16, 5,5)), Val(true)) + @test_throws ArgumentError chol!(Hermitian(rand(Float16, 5,5)), Val(true)) end end # module TestCholesky diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index a703a28f4a7e4..a7ddfd9381442 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -47,7 +47,7 @@ bimg = randn(n,2)/2 @testset "Positive definiteness" begin @test !isposdef(ainit) @test isposdef(apd) - if eltya != Int # cannot perform cholfact! for Matrix{Int} + if eltya != Int # cannot perform chol! for Matrix{Int} @test !isposdef!(copy(ainit)) @test isposdef!(copy(apd)) end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 813c4abcf23ad..28d1f807c1b3c 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -7,7 +7,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), - chol, cholfact!, det, diag, ishermitian, isposdef, + chol, chol!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldlt, ldltfact!, logdet using SparseArrays @@ -1367,7 +1367,7 @@ function fact_(A::Sparse{<:VTypes}, cm::Array{UInt8}; return F end -function cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv +function chol!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv # Makes it an LLt unsafe_store!(common_final_ll[], 1) @@ -1378,7 +1378,7 @@ function cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv end """ - cholfact!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor + chol!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor Compute the Cholesky (``LL'``) factorization of `A`, reusing the symbolic factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/ @@ -1393,13 +1393,13 @@ See also [`chol`](@ref). be converted to `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, +chol!(F::Factor, A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; shift = 0.0) where {T<:Real} = - cholfact!(F, Sparse(A); shift = shift) + chol!(F, Sparse(A); shift = shift) function chol(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) @@ -1411,7 +1411,7 @@ function chol(A::Sparse; shift::Real=0.0, F = fact_(A, cm; perm = perm) # Compute the numerical factorization - cholfact!(F, A; shift = shift) + chol!(F, A; shift = shift) return F end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 3ac4d093af125..7b3f9326a3218 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -86,3 +86,16 @@ end kws...) where {T<:Real}, ldlt(A; kws...)) end + +# deprecate cholfact! to chol! +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: cholfact! + @deprecate(cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv, chol!(F, A; shift=shift)) + @deprecate(cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, + SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + shift = 0.0) where {T<:Real}, + chol!(F, A; shift=shift)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 846fac5aae161..28fb41ff3cacd 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -434,19 +434,19 @@ end @test F1 == F2 end - ### cholfact!/ldltfact! + ### chol!/ldltfact! F = chol(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 CHOLMOD.change_factor!(elty, true, false, true, true, F) - @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(chol!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @test size(F, 2) == 5 @test size(F, 3) == 1 @test_throws ArgumentError size(F, 0) F = chol(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(chol!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) From 64ad69e10992412f5d71fcd2f533bd86526adc2d Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:20:54 -0700 Subject: [PATCH 16/21] Deprecate bkfact! to bk!. --- NEWS.md | 6 +++--- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 3 +-- stdlib/LinearAlgebra/src/bunchkaufman.jl | 16 ++++++++-------- stdlib/LinearAlgebra/src/deprecated.jl | 9 ++++++--- stdlib/LinearAlgebra/test/bunchkaufman.jl | 6 +++--- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/NEWS.md b/NEWS.md index d851dd629c50a..3165abc7d762b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,9 +698,9 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, and `cholfact!` - have respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, `qr!`, - and `chol!` ([#27159]). + * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, and + `bkfact!` have respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, + `qr!`, `chol!`, and `bk!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 8c83fe82ac6d7..929c90f0cdee3 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -329,7 +329,7 @@ LinearAlgebra.QRPivoted LinearAlgebra.lq! LinearAlgebra.lq LinearAlgebra.bk -LinearAlgebra.bkfact! +LinearAlgebra.bk! LinearAlgebra.eig LinearAlgebra.eigvals LinearAlgebra.eigvals! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 1dd75d07aef4a..58987e29e186f 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -64,8 +64,7 @@ export axpy!, axpby!, bk, - bkfact, - bkfact!, + bk!, chol, chol!, cond, diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index cbd3fdd8ed272..e482dea915750 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -24,24 +24,24 @@ Base.iterate(S::BunchKaufman, ::Val{:done}) = nothing """ - bkfact!(A, rook::Bool=false) -> BunchKaufman + bk!(A, rook::Bool=false) -> BunchKaufman -`bkfact!` is the same as [`bk`](@ref), but saves space by overwriting the +`bk!` is the same as [`bk`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ -function bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) +function bk!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) LD, ipiv, info = rook ? LAPACK.sytrf_rook!(A.uplo, A.data) : LAPACK.sytrf!(A.uplo, A.data) BunchKaufman(LD, ipiv, A.uplo, true, rook, info) end -function bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false) +function bk!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false) LD, ipiv, info = rook ? LAPACK.hetrf_rook!(A.uplo, A.data) : LAPACK.hetrf!(A.uplo, A.data) BunchKaufman(LD, ipiv, A.uplo, false, rook, info) end -function bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) +function bk!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) if ishermitian(A) - return bkfact!(Hermitian(A), rook) + return bk!(Hermitian(A), rook) elseif issymmetric(A) - return bkfact!(Symmetric(A), rook) + return bk!(Symmetric(A), rook) else throw(ArgumentError("Bunch-Kaufman decomposition is only valid for symmetric or Hermitian matrices")) end @@ -99,7 +99,7 @@ true ``` """ bk(A::AbstractMatrix{T}, rook::Bool=false) where {T} = - bkfact!(copy_oftype(A, typeof(sqrt(one(T)))), rook) + bk!(copy_oftype(A, typeof(sqrt(one(T)))), rook) convert(::Type{BunchKaufman{T}}, B::BunchKaufman{T}) where {T} = B convert(::Type{BunchKaufman{T}}, B::BunchKaufman) where {T} = diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index d8b63f2c8f417..7ded1dc78ed2e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -20,8 +20,7 @@ export cholfact @deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) # bkfact -export bkfact -import .LinearAlgebra: bkfact! +export bkfact, bkfact! function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), @@ -1378,4 +1377,8 @@ export cholfact! @deprecate(cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0), chol!(A, Val(true); tol=tol)) @deprecate(cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0), chol!(A, Val(true); tol=tol)) - +# deprecate bkfact! to bk! +export bkfact! +@deprecate(bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false), bk!(A, rook)) +@deprecate(bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false), bk!(A, rook)) +@deprecate(bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false), bk!(A, rook)) diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index 0be07848bb14d..ca7ba964bff80 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -50,9 +50,9 @@ bimg = randn(n,2)/2 @testset for rook in (false, true) @test inv(bk(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) if eltya <: BlasFloat - # test also bkfact! without explicit type tag - # no bkfact! method for Int ... yet - @test inv(bkfact!(transpose(a) + a, rook))*(transpose(a) + a) ≈ Matrix(I, n, n) + # test also bk! without explicit type tag + # no bk! method for Int ... yet + @test inv(bk!(transpose(a) + a, rook))*(transpose(a) + a) ≈ Matrix(I, n, n) end @test size(bc1) == size(bc1.LD) @test size(bc1, 1) == size(bc1.LD, 1) From cc5643b72fb61ca679939d61e6501d8fc505887a Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:31:12 -0700 Subject: [PATCH 17/21] Deprecate ldltfact! to ldlt!. --- NEWS.md | 6 +++--- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++++ stdlib/LinearAlgebra/src/ldlt.jl | 8 ++++---- stdlib/SparseArrays/src/linalg.jl | 4 ++-- stdlib/SuiteSparse/src/cholmod.jl | 16 ++++++++-------- stdlib/SuiteSparse/src/deprecated.jl | 13 +++++++++++++ stdlib/SuiteSparse/test/cholmod.jl | 10 +++++----- 10 files changed, 42 insertions(+), 25 deletions(-) diff --git a/NEWS.md b/NEWS.md index 3165abc7d762b..92f555ee39abc 100644 --- a/NEWS.md +++ b/NEWS.md @@ -698,9 +698,9 @@ Deprecated or removed `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). - * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, and - `bkfact!` have respectively been deprecated to `lu!`, `eig!`, `schur!`, `lq!`, - `qr!`, `chol!`, and `bk!` ([#27159]). + * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, + `bkfact!`, and `ldltfact!` have respectively been deprecated to `lu!`, + `eig!`, `schur!`, `lq!`, `qr!`, `chol!`, `bk!`, and `ldlt!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 929c90f0cdee3..1fb85e9997475 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -320,7 +320,7 @@ LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! LinearAlgebra.lowrankdowndate! LinearAlgebra.ldlt -LinearAlgebra.ldltfact! +LinearAlgebra.ldlt! LinearAlgebra.qr LinearAlgebra.qr! LinearAlgebra.QR diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 58987e29e186f..b7b560193f435 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -100,7 +100,7 @@ export istriu, kron, ldiv!, - ldltfact!, + ldlt!, ldlt, linreg, logabsdet, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 55ae21dfdbe28..5b899359459ea 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1198,7 +1198,7 @@ function factorize(A::StridedMatrix{T}) where T if utri1 if (herm & (T <: Complex)) | sym try - return ldltfact!(SymTridiagonal(diag(A), diag(A, -1))) + return ldlt!(SymTridiagonal(diag(A), diag(A, -1))) end end return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7ded1dc78ed2e..1d32793948046 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1382,3 +1382,7 @@ export bkfact! @deprecate(bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false), bk!(A, rook)) @deprecate(bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false), bk!(A, rook)) @deprecate(bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false), bk!(A, rook)) + +# deprecate ldltfact! to ldlt! +export ldltfact! +@deprecate(ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V}, ldlt!(S)) diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl index 86a9ca0c1105c..c97e4910014e4 100644 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ b/stdlib/LinearAlgebra/src/ldlt.jl @@ -17,7 +17,7 @@ Factorization{T}(F::LDLt{S,U}) where {T,S,U} = LDLt{T,U}(F) # SymTridiagonal """ - ldltfact!(S::SymTridiagonal) -> LDLt + ldlt!(S::SymTridiagonal) -> LDLt Same as [`ldlt`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. @@ -29,7 +29,7 @@ julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) 1.0 4.0 2.0 ⋅ 2.0 5.0 -julia> ldltS = ldltfact!(S); +julia> ldltS = ldlt!(S); julia> ldltS === S false @@ -41,7 +41,7 @@ julia> S ⋅ 0.545455 3.90909 ``` """ -function ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V} +function ldlt!(S::SymTridiagonal{T,V}) where {T<:Real,V} n = size(S,1) d = S.dv e = S.ev @@ -86,7 +86,7 @@ julia> S \\ b """ function ldlt(M::SymTridiagonal{T}) where T S = typeof(zero(T)/one(T)) - return S == T ? ldltfact!(copy(M)) : ldltfact!(SymTridiagonal{S}(M)) + return S == T ? ldlt!(copy(M)) : ldlt!(SymTridiagonal{S}(M)) end factorize(S::SymTridiagonal) = ldlt(S) diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 269f1e71a59c9..8e6142a38aaa4 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -994,7 +994,7 @@ end # if LinearAlgebra.issuccess(F) # return F # else -# ldltfact!(F, A) +# ldlt!(F, A) # return F # end # end @@ -1003,7 +1003,7 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri if LinearAlgebra.issuccess(F) return F else - ldltfact!(F, A) + ldlt!(F, A) return F end end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 28d1f807c1b3c..8ef80ed14204b 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -8,7 +8,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), chol, chol!, det, diag, ishermitian, isposdef, - issuccess, issymmetric, ldlt, ldltfact!, logdet + issuccess, issymmetric, ldlt, ldlt!, logdet using SparseArrays import Libdl @@ -1456,7 +1456,7 @@ chol(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, kws...) where {T<:Real} = chol(Sparse(A); kws...) -function ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv +function ldlt!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv cm = defaults(common_struct) set_print_level(cm, 0) @@ -1470,7 +1470,7 @@ function ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv end """ - ldltfact!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor + ldlt!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor Compute the ``LDL'`` factorization of `A`, reusing the symbolic factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) @@ -1485,13 +1485,13 @@ See also [`ldlt`](@ref). be converted to `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, +ldlt!(F::Factor, A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; shift = 0.0) where {T<:Real} = - ldltfact!(F, Sparse(A), shift = shift) + ldlt!(F, Sparse(A), shift = shift) function ldlt(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) @@ -1508,7 +1508,7 @@ function ldlt(A::Sparse; shift::Real=0.0, F = fact_(A, cm; perm = perm) # Compute the numerical factorization - ldltfact!(F, A; shift = shift) + ldlt!(F, A; shift = shift) return F end @@ -1716,7 +1716,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) if issuccess(F) return \(F, B) else - ldltfact!(F, A) + ldlt!(F, A) if issuccess(F) return \(F, B) else @@ -1730,7 +1730,7 @@ function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrM if issuccess(F) return \(adjoint(F), B) else - ldltfact!(F, A) + ldlt!(F, A) if issuccess(F) return \(adjoint(F), B) else diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 7b3f9326a3218..1f6b7ad65ae78 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -99,3 +99,16 @@ end shift = 0.0) where {T<:Real}, chol!(F, A; shift=shift)) end + +# deprecate ldltfact! to ldlt! +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: ldltfact! + @deprecate(ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv, ldlt!(F, A; shift=shift)) + @deprecate(ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, + SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + shift = 0.0) where {T<:Real}, + ldlt!(F, A; shift=shift)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 28fb41ff3cacd..92bce2088524a 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -434,7 +434,7 @@ end @test F1 == F2 end - ### chol!/ldltfact! + ### chol!/ldlt! F = chol(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 @@ -450,11 +450,11 @@ end F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(ldlt!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality F = ldlt(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(ldlt!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @test isa(CHOLMOD.factor_to_sparse!(F), CHOLMOD.Sparse) @test_throws CHOLMOD.CHOLMODException CHOLMOD.factor_to_sparse!(F) @@ -701,7 +701,7 @@ end @test F\b ≈ fill(1., m+n) F2 = chol(M) @test !LinearAlgebra.issuccess(F2) - ldltfact!(F2, M) + ldlt!(F2, M) @test LinearAlgebra.issuccess(F2) @test F2\b ≈ fill(1., m+n) end @@ -809,7 +809,7 @@ end A[3, 3] = -1 F = chol(A) @test !LinearAlgebra.issuccess(F) - @test LinearAlgebra.issuccess(ldltfact!(F, A)) + @test LinearAlgebra.issuccess(ldlt!(F, A)) A[3, 3] = 1 @test A[:, 3:-1:1]\fill(1., 3) == [1, 1, 1] end From 6ae28a3408aac6f8686a2c0ae153e453741a022c Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:35:08 -0700 Subject: [PATCH 18/21] Deprecate hessfact! to hess!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 5 +++++ stdlib/LinearAlgebra/src/hessenberg.jl | 10 +++++----- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/NEWS.md b/NEWS.md index 92f555ee39abc..509c80e0710c4 100644 --- a/NEWS.md +++ b/NEWS.md @@ -699,8 +699,8 @@ Deprecated or removed `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, - `bkfact!`, and `ldltfact!` have respectively been deprecated to `lu!`, - `eig!`, `schur!`, `lq!`, `qr!`, `chol!`, `bk!`, and `ldlt!` ([#27159]). + `bkfact!`, `ldltfact!`, and `hessfact!` have respectively been deprecated to + `lu!`, `eig!`, `schur!`, `lq!`, `qr!`, `chol!`, `bk!`, `ldlt!`, and `hess!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 1fb85e9997475..79b23a63384e0 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -338,7 +338,7 @@ LinearAlgebra.eigmin LinearAlgebra.eigvecs LinearAlgebra.eig! LinearAlgebra.hess -LinearAlgebra.hessfact! +LinearAlgebra.hess! LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index b7b560193f435..8f9f090911a45 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -89,7 +89,7 @@ export factorize, givens, hess, - hessfact!, + hess!, isdiag, ishermitian, isposdef, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 1d32793948046..555fb3d1bffa5 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1386,3 +1386,8 @@ export bkfact! # deprecate ldltfact! to ldlt! export ldltfact! @deprecate(ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V}, ldlt!(S)) + +# deprecate hessfact! to hess! +export hessfact! +@deprecate(hessfact!(A::StridedMatrix{<:BlasFloat}), hess!(A)) + diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index df22f2f234a01..e5d8cbfb2d298 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -15,14 +15,14 @@ Base.iterate(S::Hessenberg, ::Val{:H}) = (S.H, Val(:done)) Base.iterate(S::Hessenberg, ::Val{:done}) = nothing """ - hessfact!(A) -> Hessenberg + hess!(A) -> Hessenberg -`hessfact!` is the same as [`hess`](@ref), but saves space by overwriting +`hess!` is the same as [`hess`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ -hessfact!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) +hess!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) -hess(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) +hess(A::StridedMatrix{<:BlasFloat}) = hess!(copy(A)) """ hess(A) -> Hessenberg @@ -58,7 +58,7 @@ true ``` """ hess(A::StridedMatrix{T}) where T = - hessfact!(copy_oftype(A, eigtype(T))) + hess!(copy_oftype(A, eigtype(T))) struct HessenbergQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::S From 9f68a9eb9bb8f523582beae573ffc85da4a935f2 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:51:10 -0700 Subject: [PATCH 19/21] Deprecate svdfact! to svd!. --- NEWS.md | 5 +++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/bidiag.jl | 8 +++---- stdlib/LinearAlgebra/src/deprecated.jl | 6 ++++++ stdlib/LinearAlgebra/src/svd.jl | 26 +++++++++++------------ stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/test/bidiag.jl | 2 +- stdlib/LinearAlgebra/test/svd.jl | 2 +- stdlib/LinearAlgebra/test/triangular.jl | 2 +- 10 files changed, 32 insertions(+), 25 deletions(-) diff --git a/NEWS.md b/NEWS.md index 509c80e0710c4..16fcfc213822e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -699,8 +699,9 @@ Deprecated or removed `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, - `bkfact!`, `ldltfact!`, and `hessfact!` have respectively been deprecated to - `lu!`, `eig!`, `schur!`, `lq!`, `qr!`, `chol!`, `bk!`, `ldlt!`, and `hess!` ([#27159]). + `bkfact!`, `ldltfact!`, `hessfact!`, and `svdfact!` have respectively been + deprecated to `lu!`, `eig!`, `schur!`, `lq!`, `qr!`, `chol!`, `bk!`, + `ldlt!`, `hess!`, and `svd!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 79b23a63384e0..bb308b88a782e 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -344,7 +344,7 @@ LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! LinearAlgebra.svdfact -LinearAlgebra.svdfact! +LinearAlgebra.svd! LinearAlgebra.svd LinearAlgebra.svdvals LinearAlgebra.svdvals! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 8f9f090911a45..9f4f56d47e312 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -131,7 +131,7 @@ export schur, schur!, svd, - svdfact!, + svd!, svdfact, svdvals!, svdvals, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 76efa4356eafd..3509514bebdd7 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -188,12 +188,12 @@ similar(B::Bidiagonal, ::Type{T}) where {T} = Bidiagonal(similar(B.dv, T), simil #Singular values svdvals!(M::Bidiagonal{<:BlasReal}) = LAPACK.bdsdc!(M.uplo, 'N', M.dv, M.ev)[1] -function svdfact!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact!(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd!(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact!(A; full = $(!thin))`."), :svdfact!) + "e.g. `svd!(A; full = $(!thin))`."), :svd!) full::Bool = !thin end d, e, U, Vt, Q, iQ = LAPACK.bdsdc!(M.uplo, 'I', M.dv, M.ev) @@ -207,7 +207,7 @@ function svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) full::Bool = !thin end - return svdfact!(copy(M), full = full) + return svd!(copy(M), full = full) end #################### diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 555fb3d1bffa5..30e7b81519a5c 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1391,3 +1391,9 @@ export ldltfact! export hessfact! @deprecate(hessfact!(A::StridedMatrix{<:BlasFloat}), hess!(A)) +# deprecate svdfact! to svd! +export svdfact! +@deprecate(svdfact!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd!(M; full=full, thin=thin)) +@deprecate(svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat, svd!(A; full=full, thin=thin)) +@deprecate(svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat, svd!(A, B)) +@deprecate(svdfact!(A::AbstractTriangular), svd!(A)) diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 0a2070633b5c3..5c85faa058ade 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -11,9 +11,9 @@ end SVD(U::AbstractArray{T}, S::Vector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = SVD{T,Tr,typeof(U)}(U, S, Vt) """ - svdfact!(A; full::Bool = false) -> SVD + svd!(A; full::Bool = false) -> SVD -`svdfact!` is the same as [`svdfact`](@ref), but saves space by +`svd!` is the same as [`svdfact`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. # Examples @@ -25,7 +25,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 -julia> F = svdfact!(A); +julia> F = svd!(A); julia> F.U * Diagonal(F.S) * F.Vt 4×5 Array{Float64,2}: @@ -42,12 +42,12 @@ julia> A 0.0 0.0 -2.0 0.0 0.0 ``` """ -function svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat +function svd!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact!(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd!(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact!(A; full = $(!thin))`."), :svdfact!) + "e.g. `svd!(A; full = $(!thin))`."), :svd!) full::Bool = !thin end m,n = size(A) @@ -102,7 +102,7 @@ function svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Not "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) full::Bool = !thin end - svdfact!(copy_oftype(A, eigtype(T)), full = full) + svd!(copy_oftype(A, eigtype(T)), full = full) end function svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 @@ -279,9 +279,9 @@ function GeneralizedSVD(U::AbstractMatrix{T}, V::AbstractMatrix{T}, Q::AbstractM end """ - svdfact!(A, B) -> GeneralizedSVD + svd!(A, B) -> GeneralizedSVD -`svdfact!` is the same as [`svdfact`](@ref), but modifies the arguments +`svd!` is the same as [`svdfact`](@ref), but modifies the arguments `A` and `B` in-place, instead of making copies. # Examples @@ -296,7 +296,7 @@ julia> B = [0. 1.; 1. 0.] 0.0 1.0 1.0 0.0 -julia> F = svdfact!(A, B); +julia> F = svd!(A, B); julia> F.U*F.D1*F.R0*F.Q' 2×2 Array{Float64,2}: @@ -319,7 +319,7 @@ julia> B 0.0 -1.0 ``` """ -function svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat +function svd!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat # xggsvd3 replaced xggsvd in LAPACK 3.6.0 if LAPACK.version() < v"3.6.0" U, V, Q, a, b, k, l, R = LAPACK.ggsvd!('U', 'V', 'Q', A, B) @@ -328,7 +328,7 @@ function svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat end GeneralizedSVD(U, V, Q, a, b, Int(k), Int(l), R) end -svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svdfact!(copy(A),copy(B)) +svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) """ svdfact(A, B) -> GeneralizedSVD @@ -381,7 +381,7 @@ julia> F.V*F.D2*F.R0*F.Q' """ function svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) - return svdfact!(copy_oftype(A, S), copy_oftype(B, S)) + return svd!(copy_oftype(A, S), copy_oftype(B, S)) end # This method can be heavily optimized but it is probably not critical # and might introduce bugs or inconsistencies relative to the 1x1 matrix diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 26bd14dc66b1f..ad69a86ba4265 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2435,7 +2435,7 @@ end eig(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems -for func in (:svd, :svdfact, :svdfact!, :svdvals) +for func in (:svd, :svdfact, :svd!, :svdvals) @eval begin ($func)(A::AbstractTriangular) = ($func)(copyto!(similar(parent(A)), A)) end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index a096ffd43c27f..39c8d4cf31258 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -242,7 +242,7 @@ srand(1) @testset "Singular systems" begin if (elty <: BlasReal) - @test AbstractArray(svdfact(T)) ≈ AbstractArray(svdfact!(copy(Tfull))) + @test AbstractArray(svdfact(T)) ≈ AbstractArray(svd!(copy(Tfull))) @test svdvals(Tfull) ≈ svdvals(T) u1, d1, v1 = svd(Tfull) u2, d2, v2 = svd(T) diff --git a/stdlib/LinearAlgebra/test/svd.jl b/stdlib/LinearAlgebra/test/svd.jl index 3af0a4f2e9e04..1210ad5a31b44 100644 --- a/stdlib/LinearAlgebra/test/svd.jl +++ b/stdlib/LinearAlgebra/test/svd.jl @@ -63,7 +63,7 @@ a2img = randn(n,n)/2 @test usv\b ≈ a\b if eltya <: BlasFloat - svdz = svdfact!(Matrix{eltya}(undef,0,0)) + svdz = svd!(Matrix{eltya}(undef,0,0)) @test svdz.U ≈ Matrix{eltya}(I, 0, 0) @test svdz.S ≈ real(zeros(eltya,0)) @test svdz.Vt ≈ Matrix{eltya}(I, 0, 0) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 01ead848d5241..8dbf3822d0ea6 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -266,7 +266,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo if !(elty1 in (BigFloat, Complex{BigFloat})) # Not implemented yet svd(A1) svdfact(A1) - elty1 <: BlasFloat && svdfact!(copy(A1)) + elty1 <: BlasFloat && svd!(copy(A1)) svdvals(A1) end From 45b4926739f18ec4ab16047df14446cd433735cd Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Mon, 21 May 2018 10:51:46 -0700 Subject: [PATCH 20/21] Deprecate svdfact to svd. --- NEWS.md | 7 +- doc/src/manual/parallel-computing.md | 4 +- doc/src/manual/performance-tips.md | 4 +- stdlib/Distributed/test/distributed_exec.jl | 4 +- stdlib/IterativeEigensolvers/test/runtests.jl | 16 +- stdlib/LinearAlgebra/docs/src/index.md | 3 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/bidiag.jl | 6 +- stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 12 ++ stdlib/LinearAlgebra/src/diagonal.jl | 6 +- stdlib/LinearAlgebra/src/svd.jl | 179 ++++++------------ stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/test/bidiag.jl | 4 +- stdlib/LinearAlgebra/test/diagonal.jl | 2 +- stdlib/LinearAlgebra/test/svd.jl | 22 +-- stdlib/LinearAlgebra/test/triangular.jl | 1 - test/bitarray.jl | 3 +- 18 files changed, 114 insertions(+), 166 deletions(-) diff --git a/NEWS.md b/NEWS.md index 16fcfc213822e..0dedec60771b6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -246,6 +246,9 @@ This section lists changes that do not have deprecation warnings. `CholeskyPivoted`, and `SuiteSparse.CHOLMOD.Factor` rather than tuples of arrays or tuples of numbers or numbers ([#27159]). + * `svd` methods now return decomposition objects such as `SVD` and + `GeneralizedSVD` rather than tuples of arrays or tuples of numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -695,8 +698,8 @@ Deprecated or removed `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). * `lufact`, `eigfact`, `schurfact`, `lqfact`, `qrfact`, `bkfact`, `cholfact`, - `ldltfact`, and `hessfact` have respectively been deprecated to `lu`, `eig`, - `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, and `hess` ([#27159]). + `ldltfact`, `hessfact`, and `svdfact` have respectively been deprecated to + `lu`, `eig`, `schur`, `lq`, `qr`, `bk`, `chol`, `ldlt`, `hess`, and `svd` ([#27159]). * `lufact!`, `eigfact!`, `schurfact!`, `lqfact!`, `qrfact!`, `cholfact!`, `bkfact!`, `ldltfact!`, `hessfact!`, and `svdfact!` have respectively been diff --git a/doc/src/manual/parallel-computing.md b/doc/src/manual/parallel-computing.md index 76bb41af49448..bcfb77374d884 100644 --- a/doc/src/manual/parallel-computing.md +++ b/doc/src/manual/parallel-computing.md @@ -457,7 +457,7 @@ we could compute the singular values of several large random matrices in paralle ```julia-repl julia> M = Matrix{Float64}[rand(1000,1000) for i = 1:10]; -julia> pmap(svd, M); +julia> pmap(svdvals, M); ``` Julia's [`pmap`](@ref) is designed for the case where each function call does a large amount @@ -486,7 +486,7 @@ As an example, consider computing the singular values of matrices of different s ```julia-repl julia> M = Matrix{Float64}[rand(800,800), rand(600,600), rand(800,800), rand(600,600)]; -julia> pmap(svd, M); +julia> pmap(svdvals, M); ``` If one process handles both 800×800 matrices and another handles both 600×600 matrices, we will diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index ff8ecd570ef48..1ecac2d6baade 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -544,7 +544,7 @@ function norm(A) if isa(A, Vector) return sqrt(real(dot(A,A))) elseif isa(A, Matrix) - return maximum(svd(A)[2]) + return maximum(svdvals(A)) else error("norm: invalid argument") end @@ -555,7 +555,7 @@ This can be written more concisely and efficiently as: ```julia norm(x::Vector) = sqrt(real(dot(x,x))) -norm(A::Matrix) = maximum(svd(A)[2]) +norm(A::Matrix) = maximum(svdvals(A)) ``` ## Write "type-stable" functions diff --git a/stdlib/Distributed/test/distributed_exec.jl b/stdlib/Distributed/test/distributed_exec.jl index f073bc2ff9e74..9fb9a5e45fa63 100644 --- a/stdlib/Distributed/test/distributed_exec.jl +++ b/stdlib/Distributed/test/distributed_exec.jl @@ -576,8 +576,8 @@ end n = 10 as = [rand(4,4) for i in 1:n] bs = deepcopy(as) -cs = collect(Distributed.pgenerate(x->(sleep(rand()*0.1); svdfact(x)), bs)) -svdas = map(svdfact, as) +cs = collect(Distributed.pgenerate(x->(sleep(rand()*0.1); svd(x)), bs)) +svdas = map(svd, as) for i in 1:n @test cs[i].U ≈ svdas[i].U @test cs[i].S ≈ svdas[i].S diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 348b7cff1f5d4..3df8626edcf44 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -189,16 +189,16 @@ end S2 = svd(Array(A)) ## singular values match: - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] @testset "singular vectors" begin ## 1st left singular vector s1_left = sign(S1[1].U[3,1]) * S1[1].U[:,1] - s2_left = sign(S2[1][3,1]) * S2[1][:,1] + s2_left = sign(S2.U[3,1]) * S2.U[:,1] @test s1_left ≈ s2_left ## 1st right singular vector s1_right = sign(S1[1].V[3,1]) * S1[1].V[:,1] - s2_right = sign(S2[3][3,1]) * S2[3][:,1] + s2_right = sign(S2.V[3,1]) * S2.V[:,1] @test s1_right ≈ s2_right end # Issue number 10329 @@ -213,7 +213,7 @@ end end @testset "passing guess for Krylov vectors" begin S1 = svds(A, nsv = 2, v0=rand(eltype(A),size(A,2))) - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] end @test_throws ArgumentError svds(A,nsv=0) @@ -251,21 +251,21 @@ end S2 = svd(Array(A)) ## singular values match: - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] @testset "singular vectors" begin ## left singular vectors s1_left = abs.(S1[1].U[:,1:2]) - s2_left = abs.(S2[1][:,1:2]) + s2_left = abs.(S2.U[:,1:2]) @test s1_left ≈ s2_left ## right singular vectors s1_right = abs.(S1[1].V[:,1:2]) - s2_right = abs.(S2[3][:,1:2]) + s2_right = abs.(S2.V[:,1:2]) @test s1_right ≈ s2_right end @testset "passing guess for Krylov vectors" begin S1 = svds(A, nsv = 2, v0=rand(eltype(A),size(A,2))) - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] end @test_throws ArgumentError svds(A,nsv=0) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index bb308b88a782e..c95ad2ea234df 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -343,9 +343,8 @@ LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! -LinearAlgebra.svdfact -LinearAlgebra.svd! LinearAlgebra.svd +LinearAlgebra.svd! LinearAlgebra.svdvals LinearAlgebra.svdvals! LinearAlgebra.Givens diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 9f4f56d47e312..e587eb791eb57 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -132,7 +132,6 @@ export schur!, svd, svd!, - svdfact, svdvals!, svdvals, sylvester, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 3509514bebdd7..6dc62db01ae3b 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -199,12 +199,12 @@ function svd!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,No d, e, U, Vt, Q, iQ = LAPACK.bdsdc!(M.uplo, 'I', M.dv, M.ev) SVD(U, d, Vt) end -function svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end return svd!(copy(M), full = full) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 5b899359459ea..42e8e4f3267c6 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1292,7 +1292,7 @@ function pinv(A::StridedMatrix{T}, tol::Real) where T return B end end - SVD = svdfact(A, full = false) + SVD = svd(A, full = false) Stype = eltype(SVD.S) Sinv = zeros(Stype, length(SVD.S)) index = SVD.S .> tol*maximum(SVD.S) @@ -1344,7 +1344,7 @@ julia> nullspace(M, 2) function nullspace(A::StridedMatrix, tol::Real = min(size(A)...)*eps(real(float(one(eltype(A)))))) m, n = size(A) (m == 0 || n == 0) && return Matrix{T}(I, n, n) - SVD = svdfact(A, full=true) + SVD = svd(A, full=true) indstart = sum(SVD.S .> SVD.S[1]*tol) + 1 return copy(SVD.Vt[indstart:end,:]') end diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 30e7b81519a5c..4be0ad87b978b 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1397,3 +1397,15 @@ export svdfact! @deprecate(svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat, svd!(A; full=full, thin=thin)) @deprecate(svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat, svd!(A, B)) @deprecate(svdfact!(A::AbstractTriangular), svd!(A)) + +# deprecate svdfact to svd +export svdfact +@deprecate(svdfact(D::Diagonal), svd(D)) +@deprecate(svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T, svd(A; full=full, thin=thin)) +@deprecate(svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(x; full=full, thin=thin)) +@deprecate(svdfact(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(x; full=full, thin=thin)) +@deprecate(svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, svd(A, B)) +@deprecate(svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, svd(A, B)) +@deprecate(svdfact(x::Number, y::Number), svd(x, y)) +@deprecate(svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(M; full=full, thin=thin)) +@deprecate(svdfact(A::AbstractTriangular), svd(A)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 8037266d1563c..7477b9f59918c 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -462,11 +462,7 @@ function svd(D::Diagonal{<:Number}) Up = hcat([U[:,i] for i = 1:length(D.diag)][piv]...) V = Diagonal(fill!(similar(D.diag), one(eltype(D.diag)))) Vp = hcat([V[:,i] for i = 1:length(D.diag)][piv]...) - return (Up, S[piv], Vp) -end -function svdfact(D::Diagonal) - U, s, V = svd(D) - SVD(U, s, copy(V')) + return SVD(Up, S[piv], copy(Vp')) end # dismabiguation methods: * of Diagonal and Adj/Trans AbsVec diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 5c85faa058ade..51b986a67c7d2 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -10,10 +10,24 @@ struct SVD{T,Tr,M<:AbstractArray} <: Factorization{T} end SVD(U::AbstractArray{T}, S::Vector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = SVD{T,Tr,typeof(U)}(U, S, Vt) +# iteration for destructuring into components +Base.iterate(S::SVD) = (S.U, Val(:S)) +Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V)) +Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done)) +Base.iterate(S::SVD, ::Val{:done}) = nothing + +# # indexing for destructuring into components +@inline function Base.getindex(S::SVD, i::Integer) + i == 1 ? (return S.U) : + i == 2 ? (return S.S) : + i == 3 ? (return S.V) : + throw(BoundsError(S, i)) +end + """ svd!(A; full::Bool = false) -> SVD -`svd!` is the same as [`svdfact`](@ref), but saves space by +`svd!` is the same as [`svd`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. # Examples @@ -60,7 +74,7 @@ function svd!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} end """ - svdfact(A; full::Bool = false) -> SVD + svd(A; full::Bool = false) -> SVD Compute the singular value decomposition (SVD) of `A` and return an `SVD` object. @@ -69,6 +83,8 @@ Compute the singular value decomposition (SVD) of `A` and return an `SVD` object The algorithm produces `Vt` and hence `Vt` is more efficient to extract than `V`. The singular values in `S` are sorted in descending order. +Iterating the decomposition produces the components `U`, `S`, and `V`. + If `full = false` (default), a "thin" SVD is returned. For a ``M \\times N`` matrix `A`, in the full factorization `U` is `M \\times M` and `V` is `N \\times N`, while in the thin factorization `U` is `M @@ -84,7 +100,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 -julia> F = svdfact(A); +julia> F = svd(A); julia> F.U * Diagonal(F.S) * F.Vt 4×5 Array{Float64,2}: @@ -94,84 +110,27 @@ julia> F.U * Diagonal(F.S) * F.Vt 0.0 2.0 0.0 0.0 0.0 ``` """ -function svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T +function svd(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end svd!(copy_oftype(A, eigtype(T)), full = full) end -function svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) - full::Bool = !thin - end - return SVD(x == 0 ? fill(one(x), 1, 1) : fill(x/abs(x), 1, 1), [abs(x)], fill(one(x), 1, 1)) -end -function svdfact(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) - full::Bool = !thin - end - return svdfact(float(x), full = full) -end - -""" - svd(A; full::Bool = false) -> U, S, V - -Computes the SVD of `A`, returning `U`, vector `S`, and `V` such that -`A == U * Diagonal(S) * V'`. The singular values in `S` are sorted in descending order. - -If `full = false` (default), a "thin" SVD is returned. For a ``M -\\times N`` matrix `A`, in the full factorization `U` is `M \\times M` -and `V` is `N \\times N`, while in the thin factorization `U` is `M -\\times K` and `V` is `N \\times K`, where `K = \\min(M,N)` is the -number of singular values. - -`svd` is a wrapper around [`svdfact`](@ref), extracting all parts -of the `SVD` factorization to a tuple. Direct use of `svdfact` is therefore more -efficient. - -# Examples -```jldoctest -julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] -4×5 Array{Float64,2}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 - -julia> U, S, V = svd(A); - -julia> U * Diagonal(S) * V' -4×5 Array{Float64,2}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 -``` -""" -function svd(A::AbstractArray; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g `svd(A; full = $(!thin))`."), :svd) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end - F = svdfact(A, full = full) - F.U, F.S, copy(F.Vt') + return SVD(x == 0 ? fill(one(x), 1, 1) : fill(x/abs(x), 1, 1), [abs(x)], fill(one(x), 1, 1)) end -function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", @@ -179,7 +138,7 @@ function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end - return first.(svd(fill(x, 1, 1))) + return svd(float(x), full = full) end function getproperty(F::SVD, d::Symbol) @@ -197,7 +156,7 @@ Base.propertynames(F::SVD, private::Bool=false) = svdvals!(A) Return the singular values of `A`, saving space by overwriting the input. -See also [`svdvals`](@ref) and [`svdfact`](@ref). +See also [`svdvals`](@ref) and [`svd`](@ref). # Examples ```jldoctest @@ -278,10 +237,30 @@ function GeneralizedSVD(U::AbstractMatrix{T}, V::AbstractMatrix{T}, Q::AbstractM GeneralizedSVD{T,typeof(U)}(U, V, Q, a, b, k, l, R) end +# iteration for destructuring into components +Base.iterate(S::GeneralizedSVD) = (S.U, Val(:V)) +Base.iterate(S::GeneralizedSVD, ::Val{:V}) = (S.V, Val(:Q)) +Base.iterate(S::GeneralizedSVD, ::Val{:Q}) = (S.Q, Val(:D1)) +Base.iterate(S::GeneralizedSVD, ::Val{:D1}) = (S.D1, Val(:D2)) +Base.iterate(S::GeneralizedSVD, ::Val{:D2}) = (S.D2, Val(:R0)) +Base.iterate(S::GeneralizedSVD, ::Val{:R0}) = (S.R0, Val(:done)) +Base.iterate(S::GeneralizedSVD, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::GeneralizedSVD, i::Integer) + i == 1 ? (return S.U) : + i == 2 ? (return S.V) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.D1) : + i == 5 ? (return S.D2) : + i == 6 ? (return S.R0) : + throw(BoundsError(S, i)) +end + """ svd!(A, B) -> GeneralizedSVD -`svd!` is the same as [`svdfact`](@ref), but modifies the arguments +`svd!` is the same as [`svd`](@ref), but modifies the arguments `A` and `B` in-place, instead of making copies. # Examples @@ -328,10 +307,10 @@ function svd!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat end GeneralizedSVD(U, V, Q, a, b, Int(k), Int(l), R) end -svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) +svd(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) """ - svdfact(A, B) -> GeneralizedSVD + svd(A, B) -> GeneralizedSVD Compute the generalized SVD of `A` and `B`, returning a `GeneralizedSVD` factorization object `F`, such that `A = F.U*F.D1*F.R0*F.Q'` and `B = F.V*F.D2*F.R0*F.Q'`. @@ -341,13 +320,15 @@ For an M-by-N matrix `A` and P-by-N matrix `B`, - `U` is a M-by-M orthogonal matrix, - `V` is a P-by-P orthogonal matrix, - `Q` is a N-by-N orthogonal matrix, -- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is - nonsingular upper block triangular, - `D1` is a M-by-(K+L) diagonal matrix with 1s in the first K entries, - `D2` is a P-by-(K+L) matrix whose top right L-by-L block is diagonal, +- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is + nonsingular upper block triangular, `K+L` is the effective numerical rank of the matrix `[A; B]`. +Iterating the decomposition produces the components `U`, `V`, `Q`, `D1`, `D2`, and `R0`. + The entries of `F.D1` and `F.D2` are related, as explained in the LAPACK documentation for the [generalized SVD](http://www.netlib.org/lapack/lug/node36.html) and the @@ -366,7 +347,7 @@ julia> B = [0. 1.; 1. 0.] 0.0 1.0 1.0 0.0 -julia> F = svdfact(A, B); +julia> F = svd(A, B); julia> F.U*F.D1*F.R0*F.Q' 2×2 Array{Float64,2}: @@ -379,54 +360,14 @@ julia> F.V*F.D2*F.R0*F.Q' 1.0 0.0 ``` """ -function svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} +function svd(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) return svd!(copy_oftype(A, S), copy_oftype(B, S)) end # This method can be heavily optimized but it is probably not critical # and might introduce bugs or inconsistencies relative to the 1x1 matrix # version -svdfact(x::Number, y::Number) = svdfact(fill(x, 1, 1), fill(y, 1, 1)) - -""" - svd(A, B) -> U, V, Q, D1, D2, R0 - -Wrapper around [`svdfact`](@ref) extracting all parts of the -factorization to a tuple. Direct use of -`svdfact` is therefore generally more efficient. The function returns the generalized SVD of -`A` and `B`, returning `U`, `V`, `Q`, `D1`, `D2`, and `R0` such that `A = U*D1*R0*Q'` and `B = -V*D2*R0*Q'`. - -# Examples -```jldoctest -julia> A = [1. 0.; 0. -1.] -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 -1.0 - -julia> B = [0. 1.; 1. 0.] -2×2 Array{Float64,2}: - 0.0 1.0 - 1.0 0.0 - -julia> U, V, Q, D1, D2, R0 = svd(A, B); - -julia> U*D1*R0*Q' -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 -1.0 - -julia> V*D2*R0*Q' -2×2 Array{Float64,2}: - 0.0 1.0 - 1.0 0.0 -``` -""" -function svd(A::AbstractMatrix, B::AbstractMatrix) - F = svdfact(A, B) - F.U, F.V, F.Q, F.D1, F.D2, F.R0 -end -svd(x::Number, y::Number) = first.(svd(fill(x, 1, 1), fill(y, 1, 1))) +svd(x::Number, y::Number) = svd(fill(x, 1, 1), fill(y, 1, 1)) @inline function getproperty(F::GeneralizedSVD{T}, d::Symbol) where T Fa = getfield(F, :a) @@ -476,7 +417,7 @@ Base.propertynames(F::GeneralizedSVD) = Return the generalized singular values from the generalized singular value decomposition of `A` and `B`, saving space by overwriting `A` and `B`. -See also [`svdfact`](@ref) and [`svdvals`](@ref). +See also [`svd`](@ref) and [`svdvals`](@ref). # Examples ```jldoctest @@ -521,7 +462,7 @@ svdvals(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = svdvals! svdvals(A, B) Return the generalized singular values from the generalized singular value -decomposition of `A` and `B`. See also [`svdfact`](@ref). +decomposition of `A` and `B`. See also [`svd`](@ref). # Examples ```jldoctest diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index ad69a86ba4265..36ac10219513d 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2435,7 +2435,7 @@ end eig(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems -for func in (:svd, :svdfact, :svd!, :svdvals) +for func in (:svd, :svd!, :svdvals) @eval begin ($func)(A::AbstractTriangular) = ($func)(copyto!(similar(parent(A)), A)) end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 39c8d4cf31258..1f4fa2d41b637 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -242,14 +242,14 @@ srand(1) @testset "Singular systems" begin if (elty <: BlasReal) - @test AbstractArray(svdfact(T)) ≈ AbstractArray(svd!(copy(Tfull))) + @test AbstractArray(svd(T)) ≈ AbstractArray(svd!(copy(Tfull))) @test svdvals(Tfull) ≈ svdvals(T) u1, d1, v1 = svd(Tfull) u2, d2, v2 = svd(T) @test d1 ≈ d2 if elty <: Real test_approx_eq_modphase(u1, u2) - test_approx_eq_modphase(v1, v2) + test_approx_eq_modphase(copy(v1), copy(v2)) end @test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull)) @inferred svdvals(T) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index ba3d1a72182af..4fec50309adb0 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -265,7 +265,7 @@ srand(1) U, s, V = svd(D) @test (U*Diagonal(s))*V' ≈ D @test svdvals(D) == s - @test svdfact(D).V == V + @test svd(D).V == V end end diff --git a/stdlib/LinearAlgebra/test/svd.jl b/stdlib/LinearAlgebra/test/svd.jl index 1210ad5a31b44..44047d35b523a 100644 --- a/stdlib/LinearAlgebra/test/svd.jl +++ b/stdlib/LinearAlgebra/test/svd.jl @@ -5,7 +5,7 @@ module TestSVD using Test, LinearAlgebra, Random using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted -@testset "Simple svdvals / svdfact tests" begin +@testset "Simple svdvals / svd tests" begin ≊(x,y) = isapprox(x,y,rtol=1e-15) m1 = [2 0; 0 0] @@ -15,8 +15,8 @@ using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted @test @inferred(svdvals(m2)) ≊ [2, 1] @test @inferred(svdvals(m2c)) ≊ [2, 1] - sf1 = svdfact(m1) - sf2 = svdfact(m2) + sf1 = svd(m1) + sf2 = svd(m2) @test sf1.S ≊ [2, 0] @test sf2.S ≊ [2, 1] # U & Vt are unitary @@ -52,7 +52,7 @@ a2img = randn(n,n)/2 for (a, a2) in ((aa, aa2), (view(aa, 1:n, 1:n), view(aa2, 1:n, 1:n))) ε = εa = eps(abs(float(one(eltya)))) - usv = svdfact(a) + usv = svd(a) @testset "singular value decomposition" begin @test usv.S === svdvals(usv) @test usv.U * (Diagonal(usv.S) * usv.Vt) ≈ a @@ -71,7 +71,7 @@ a2img = randn(n,n)/2 end @testset "Generalized svd" begin a_svd = a[1:n1, :] - gsvd = svdfact(a,a_svd) + gsvd = svd(a,a_svd) @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ a @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ a_svd @test usv.Vt' ≈ usv.V @@ -79,7 +79,7 @@ a2img = randn(n,n)/2 @test_throws ErrorException gsvd.Z @test gsvd.vals ≈ svdvals(a,a_svd) α = eltya == Int ? -1 : rand(eltya) - β = svdfact(α) + β = svd(α) @test β.S == [abs(α)] @test svdvals(α) == abs(α) u,v,q,d1,d2,r0 = svd(a,a_svd) @@ -93,7 +93,7 @@ a2img = randn(n,n)/2 #testing the other layout for D1 & D2 b = rand(eltya,n,2*n) c = rand(eltya,n,2*n) - gsvd = svdfact(b,c) + gsvd = svd(b,c) @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ b @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ c end @@ -101,18 +101,16 @@ a2img = randn(n,n)/2 if eltya <: LinearAlgebra.BlasReal @testset "Number input" begin x, y = randn(eltya, 2) - @test svdfact(x) == svdfact(fill(x, 1, 1)) + @test svd(x) == svd(fill(x, 1, 1)) @test svdvals(x) == first(svdvals(fill(x, 1, 1))) - @test svd(x) == first.(svd(fill(x, 1, 1))) - @test svdfact(x, y) == svdfact(fill(x, 1, 1), fill(y, 1, 1)) + @test svd(x, y) == svd(fill(x, 1, 1), fill(y, 1, 1)) @test svdvals(x, y) ≈ first(svdvals(fill(x, 1, 1), fill(y, 1, 1))) - @test svd(x, y) == first.(svd(fill(x, 1, 1), fill(y, 1, 1))) end end if eltya != Int @testset "isequal, ==, and hash" begin x, y = rand(eltya), convert(eltya, NaN) - Fx, Fy = svdfact(x), svdfact(y) + Fx, Fy = svd(x), svd(y) @test Fx == Fx @test !(Fy == Fy) @test isequal(Fy, Fy) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 8dbf3822d0ea6..f650423650eca 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -265,7 +265,6 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo if !(elty1 in (BigFloat, Complex{BigFloat})) # Not implemented yet svd(A1) - svdfact(A1) elty1 <: BlasFloat && svd!(copy(A1)) svdvals(A1) end diff --git a/test/bitarray.jl b/test/bitarray.jl index 6e48c416d2e9d..0e5b5913f3942 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1467,7 +1467,8 @@ timesofar("cat") @check_bit_operation diff(b1, dims=2) Matrix{Int} b1 = bitrand(n1, n1) - @check_bit_operation svd(b1) + @test ((svdb1, svdb1A) = (svd(b1), svd(Array(b1))); + svdb1.U == svdb1A.U && svdb1.S == svdb1A.S && svdb1.V == svdb1A.V) @test ((qrb1, qrb1A) = (qr(b1), qr(Array(b1))); qrb1.Q == qrb1A.Q && qrb1.R == qrb1A.R) From bf5ddfaddf16e23013e5259d956d411d7ec3ee57 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Mon, 21 May 2018 12:09:08 -0700 Subject: [PATCH 21/21] Deprecate decomposition getindex methods in favor of property accessors. --- stdlib/LinearAlgebra/src/deprecated.jl | 123 +++++++++++++++++++++++++ stdlib/LinearAlgebra/src/eigen.jl | 7 -- stdlib/LinearAlgebra/src/lq.jl | 7 -- stdlib/LinearAlgebra/src/lu.jl | 8 -- stdlib/LinearAlgebra/src/qr.jl | 26 +----- stdlib/LinearAlgebra/src/schur.jl | 19 ---- stdlib/LinearAlgebra/src/svd.jl | 8 -- stdlib/LinearAlgebra/test/eigen.jl | 4 +- stdlib/LinearAlgebra/test/generic.jl | 2 +- stdlib/LinearAlgebra/test/lq.jl | 1 - 10 files changed, 127 insertions(+), 78 deletions(-) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 4be0ad87b978b..f76f5c43a6c83 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1409,3 +1409,126 @@ export svdfact @deprecate(svdfact(x::Number, y::Number), svd(x, y)) @deprecate(svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(M; full=full, thin=thin)) @deprecate(svdfact(A::AbstractTriangular), svd(A)) + +# deprecate transitional decomposition getindex methods out of the blocks +@inline function Base.getindex(S::LU, i::Integer) + depwarn(string("decomposition functions (e.g. `lu`) now return decomposition ", + "objects (e.g. `LU`), and indexing such objects is deprecated. Instead ", + "extract components via their accessors (e.g. `X.L`, `X.S`, and `X.p` for ", + "`X::LU`), or destructure the decomposition via iteration ", + "(e.g. `l, u, p = X` for `X::LU`)."), :getindex) + i == 1 ? (return S.L) : + i == 2 ? (return S.U) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) + depwarn(string("decomposition functions (e.g. `eig`) now return decomposition ", + "objects (e.g. `Eigen` and `GeneralizedEigen`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.values` and `X.vectors` for `X::Union{Eigen,GeneralizedEigen}`), ", + "or destructure the decomposition via iteration ", + "(e.g. `vals, vecs = X` for `X::Union{Eigen,GeneralizedEigen}`)."), :getindex) + i == 1 ? (return S.values) : + i == 2 ? (return S.vectors) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::Schur, i::Integer) + depwarn(string("decomposition functions (e.g. `schur`) now return decomposition ", + "objects (e.g. `Schur`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.T`, `X.Z`, and `X.values` for `X::Schur`), ", + "or destructure the decomposition via iteration ", + "(e.g. `t, z, vals = X` for `X::Schur`)."), :getindex) + i == 1 ? (return S.T) : + i == 2 ? (return S.Z) : + i == 3 ? (return S.values) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::GeneralizedSchur, i::Integer) + depwarn(string("decomposition functions (e.g. `schur`) now return decomposition ", + "objects (e.g. `GeneralizedSchur`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.S`, `X.T`, `X.Q`, `X.Z`, `X.α`, and `X.β` for `X::GeneralizedSchur`), ", + "or destructure the decomposition via iteration ", + "(e.g. `s, t, q, z, α, β = X` for `X::GeneralizedSchur`)."), :getindex) + i == 1 ? (return S.S) : + i == 2 ? (return S.T) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.Z) : + i == 5 ? (return S.α) : + i == 6 ? (return S.β) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::LQ, i::Integer) + depwarn(string("decomposition functions (e.g. `lq`) now return decomposition ", + "objects (e.g. `LQ`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.L` and `X.Q` for `X::LQ`), ", + "or destructure the decomposition via iteration ", + "(e.g. `l, q = X` for `X::LQ`)."), :getindex) + i == 1 ? (return S.L) : + i == 2 ? (return S.Q) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::QR, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QR`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q` and `X.R` for `X::QR`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r = X` for `X::QR`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::QRCompactWY, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QRCompactWY`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q` and `X.R` for `X::QR`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r = X` for `X::QR`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::QRPivoted, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QRPivoted`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q`, `X.R`, and `X.p` for `X::QRPivoted`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r, p = X` for `X::QRPivoted`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::SVD, i::Integer) + depwarn(string("decomposition functions (e.g. `svd`) now return decomposition ", + "objects (e.g. `SVD`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.U`, `X.S`, and `X.V` for `X::SVD`), ", + "or destructure the decomposition via iteration ", + "(e.g. `u, s, v = X` for `X::SVD`)."), :getindex) + i == 1 ? (return S.U) : + i == 2 ? (return S.S) : + i == 3 ? (return S.V) : + throw(BoundsError(S, i)) +end +@inline function Base.getindex(S::GeneralizedSVD, i::Integer) + depwarn(string("decomposition functions (e.g. `svd`) now return decomposition ", + "objects (e.g. `GeneralizedSVD`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.U`, `X.V`, `X.Q`, `X.D1`, `X.D2`, and `X.R0` for `X::GeneralizedSVD`), ", + "or destructure the decomposition via iteration ", + "(e.g. `u, v, q, d1, d2, r0 = X` for `X::GeneralizedSVD`)."), :getindex) + i == 1 ? (return S.U) : + i == 2 ? (return S.V) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.D1) : + i == 5 ? (return S.D2) : + i == 6 ? (return S.R0) : + throw(BoundsError(S, i)) +end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index bf25e134ade10..97745d058bc41 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -25,13 +25,6 @@ Base.iterate(S::Union{Eigen,GeneralizedEigen}) = (S.values, Val(:vectors)) Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:vectors}) = (S.vectors, Val(:done)) Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) - i == 1 ? (return S.values) : - i == 2 ? (return S.vectors) : - throw(BoundsError(S, i)) -end - isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 10ca990f027cb..57fc6b6f5d8a0 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -14,13 +14,6 @@ Base.iterate(S::LQ) = (S.L, Val(:Q)) Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) Base.iterate(S::LQ, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::LQ, i::Integer) - i == 1 ? (return S.L) : - i == 2 ? (return S.Q) : - throw(BoundsError(S, i)) -end - struct LQPackedQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::Matrix{T} τ::Vector{T} diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index f041130230290..60e1987b3409c 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -17,14 +17,6 @@ Base.iterate(S::LU, ::Val{:U}) = (S.U, Val(:p)) Base.iterate(S::LU, ::Val{:p}) = (S.p, Val(:done)) Base.iterate(S::LU, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::LU, i::Integer) - i == 1 ? (return S.L) : - i == 2 ? (return S.U) : - i == 3 ? (return S.p) : - throw(BoundsError(S, i)) -end - adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 90e5ae7d21c91..a036fd9b5cd47 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -46,14 +46,6 @@ Base.iterate(S::QR) = (S.Q, Val(:R)) Base.iterate(S::QR, ::Val{:R}) = (S.R, Val(:done)) Base.iterate(S::QR, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QR, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - throw(BoundsError(S, i)) -end - - # Note. For QRCompactWY factorization without pivoting, the WY representation based method introduced in LAPACK 3.4 """ QRCompactWY <: Factorization @@ -114,13 +106,6 @@ Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) Base.iterate(S::QRCompactWY, ::Val{:R}) = (S.R, Val(:done)) Base.iterate(S::QRCompactWY, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QRCompactWY, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - throw(BoundsError(S, i)) -end - """ QRPivoted <: Factorization @@ -170,15 +155,6 @@ Base.iterate(S::QRPivoted, ::Val{:R}) = (S.R, Val(:p)) Base.iterate(S::QRPivoted, ::Val{:p}) = (S.p, Val(:done)) Base.iterate(S::QRPivoted, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QRPivoted, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - i == 3 ? (return S.p) : - throw(BoundsError(S, i)) -end - - function qrfactUnblocked!(A::AbstractMatrix{T}) where {T} m, n = size(A) τ = zeros(T, min(m,n)) @@ -308,7 +284,7 @@ The returned object `F` stores the factorization in a packed format: - otherwise `F` is a [`QR`](@ref) object. -The individual components of the factorization `F` can be accessed by indexing with a symbol: +The individual components of the decomposition `F` can be accessed via property accessors: - `F.Q`: the orthogonal/unitary matrix `Q` - `F.R`: the upper triangular matrix `R` diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index f7e435cd4edbe..c1b848c5408e5 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -15,14 +15,6 @@ Base.iterate(S::Schur, ::Val{:Z}) = (S.Z, Val(:values)) Base.iterate(S::Schur, ::Val{:values}) = (S.values, Val(:done)) Base.iterate(S::Schur, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::Schur, i::Integer) - i == 1 ? (return S.T) : - i == 2 ? (return S.Z) : - i == 3 ? (return S.values) : - throw(BoundsError(S, i)) -end - """ schur!(A::StridedMatrix) -> F::Schur @@ -204,17 +196,6 @@ Base.iterate(S::GeneralizedSchur, ::Val{:α}) = (S.α, Val(:β)) Base.iterate(S::GeneralizedSchur, ::Val{:β}) = (S.β, Val(:done)) Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::GeneralizedSchur, i::Integer) - i == 1 ? (return S.S) : - i == 2 ? (return S.T) : - i == 3 ? (return S.Q) : - i == 4 ? (return S.Z) : - i == 5 ? (return S.α) : - i == 6 ? (return S.β) : - throw(BoundsError(S, i)) -end - """ schur!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 51b986a67c7d2..ee3e2fad582b2 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -16,14 +16,6 @@ Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V)) Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done)) Base.iterate(S::SVD, ::Val{:done}) = nothing -# # indexing for destructuring into components -@inline function Base.getindex(S::SVD, i::Integer) - i == 1 ? (return S.U) : - i == 2 ? (return S.S) : - i == 3 ? (return S.V) : - throw(BoundsError(S, i)) -end - """ svd!(A; full::Bool = false) -> SVD diff --git a/stdlib/LinearAlgebra/test/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl index e9fc8318e6ef5..97cd566211494 100644 --- a/stdlib/LinearAlgebra/test/eigen.jl +++ b/stdlib/LinearAlgebra/test/eigen.jl @@ -29,8 +29,8 @@ aimg = randn(n,n)/2 α = rand(eltya) β = rand(eltya) eab = eig(α,β) - @test eab[1] == eigvals(fill(α,1,1),fill(β,1,1)) - @test eab[2] == eigvecs(fill(α,1,1),fill(β,1,1)) + @test eab.values == eigvals(fill(α,1,1),fill(β,1,1)) + @test eab.vectors == eigvecs(fill(α,1,1),fill(β,1,1)) @testset "non-symmetric eigen decomposition" begin d, v = eig(a) diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 6108a563bbb29..8bf054f7530b4 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -240,7 +240,7 @@ end @test rank(fill(0, 0, 0)) == 0 @test rank([1.0 0.0; 0.0 0.9],0.95) == 1 -@test qr(big.([0 1; 0 0]))[2] == [0 1; 0 0] +@test qr(big.([0 1; 0 0])).R == [0 1; 0 0] @test norm([2.4e-322, 4.4e-323]) ≈ 2.47e-322 @test norm([2.4e-322, 4.4e-323], 3) ≈ 2.4e-322 diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index f8ab28755d06b..d9a7f552de63e 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -39,7 +39,6 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) α = rand(eltya) aα = fill(α,1,1) @test lq(α).L*lq(α).Q ≈ lq(aα).L*lq(aα).Q - @test lq(α)[1]*lq(α)[2] ≈ lq(aα).L*lq(aα).Q @test abs(lq(α).Q[1,1]) ≈ one(eltya) tab = promote_type(eltya,eltyb)