From 8b73a98ec1b367c2c087d0c000a1e887612546c1 Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Thu, 18 Jan 2018 00:33:29 +0100 Subject: [PATCH] Base.LinAlg to new LinearAlgebra stdlib --- NEWS.md | 3 + base/array.jl | 7 +- base/deprecated.jl | 1525 ++--------------- base/essentials.jl | 2 +- base/exports.jl | 98 +- base/interactiveutil.jl | 14 +- base/math.jl | 2 +- base/precompile.jl | 2 +- base/reflection.jl | 8 +- base/statistics.jl | 20 +- base/sysimg.jl | 12 +- doc/REQUIRE | 6 +- doc/make.jl | 6 +- doc/src/base/arrays.md | 3 +- doc/src/base/base.md | 3 - doc/src/base/index.md | 1 - doc/src/base/linalg.md | 340 ---- doc/src/index.md | 3 +- doc/src/manual/index.md | 1 - stdlib/Distributed/src/cluster.jl | 3 +- stdlib/Distributed/test/distributed_exec.jl | 4 +- .../src/IterativeEigensolvers.jl | 13 +- stdlib/IterativeEigensolvers/src/arpack.jl | 2 +- .../IterativeEigensolvers/src/deprecated.jl | 6 +- stdlib/IterativeEigensolvers/test/runtests.jl | 18 +- .../LinearAlgebra/docs/src/index.md | 357 +++- .../LinearAlgebra/src/LinearAlgebra.jl | 31 +- .../LinearAlgebra/src}/adjtrans.jl | 0 .../LinearAlgebra/src}/bidiag.jl | 0 .../LinearAlgebra/src}/bitarray.jl | 0 .../LinearAlgebra/src}/blas.jl | 5 +- .../LinearAlgebra/src}/bunchkaufman.jl | 8 +- .../LinearAlgebra/src}/cholesky.jl | 8 +- .../LinearAlgebra/src}/conjarray.jl | 0 .../LinearAlgebra/src}/dense.jl | 0 stdlib/LinearAlgebra/src/deprecated.jl | 1264 ++++++++++++++ .../LinearAlgebra/src}/diagonal.jl | 0 .../LinearAlgebra/src}/eigen.jl | 2 +- .../LinearAlgebra/src}/exceptions.jl | 0 .../LinearAlgebra/src}/factorization.jl | 4 +- .../LinearAlgebra/src}/generic.jl | 2 +- .../LinearAlgebra/src}/givens.jl | 8 +- .../LinearAlgebra/src}/hessenberg.jl | 0 .../LinearAlgebra/src}/lapack.jl | 6 +- .../LinearAlgebra/src}/ldlt.jl | 0 .../linalg => stdlib/LinearAlgebra/src}/lq.jl | 0 .../linalg => stdlib/LinearAlgebra/src}/lu.jl | 6 +- .../LinearAlgebra/src}/matmul.jl | 8 +- .../linalg => stdlib/LinearAlgebra/src}/qr.jl | 18 +- .../LinearAlgebra/src}/rowvector.jl | 0 .../LinearAlgebra/src}/schur.jl | 12 +- .../LinearAlgebra/src}/special.jl | 0 .../LinearAlgebra/src}/svd.jl | 0 .../LinearAlgebra/src}/symmetric.jl | 0 .../LinearAlgebra/src}/transpose.jl | 0 .../LinearAlgebra/src}/triangular.jl | 2 +- .../LinearAlgebra/src}/tridiag.jl | 0 .../LinearAlgebra/src}/uniformscaling.jl | 5 +- .../LinearAlgebra/test}/adjtrans.jl | 8 +- .../LinearAlgebra/test}/bidiag.jl | 22 +- .../LinearAlgebra/test}/blas.jl | 29 +- .../LinearAlgebra/test}/bunchkaufman.jl | 21 +- .../LinearAlgebra/test}/cholesky.jl | 37 +- .../LinearAlgebra/test}/dense.jl | 19 +- .../LinearAlgebra/test}/diagonal.jl | 12 +- .../LinearAlgebra/test}/eigen.jl | 7 +- .../LinearAlgebra/test}/generic.jl | 134 +- .../LinearAlgebra/test}/givens.jl | 9 +- .../LinearAlgebra/test}/hessenberg.jl | 6 +- .../LinearAlgebra/test}/lapack.jl | 41 +- .../LinearAlgebra/test}/lq.jl | 16 +- .../LinearAlgebra/test}/lu.jl | 25 +- .../LinearAlgebra/test}/matmul.jl | 48 +- .../LinearAlgebra/test}/pinv.jl | 10 +- .../LinearAlgebra/test}/qr.jl | 17 +- stdlib/LinearAlgebra/test/runtests.jl | 24 + .../LinearAlgebra/test}/schur.jl | 13 +- .../LinearAlgebra/test}/special.jl | 16 +- .../LinearAlgebra/test}/svd.jl | 9 +- .../LinearAlgebra/test}/symmetric.jl | 13 +- stdlib/LinearAlgebra/test/testutils.jl | 27 + .../LinearAlgebra/test}/triangular.jl | 20 +- .../LinearAlgebra/test}/tridiag.jl | 36 +- .../LinearAlgebra/test}/uniformscaling.jl | 21 +- stdlib/SparseArrays/src/SparseArrays.jl | 26 +- stdlib/SparseArrays/src/abstractsparse.jl | 12 +- stdlib/SparseArrays/src/deprecated.jl | 23 +- stdlib/SparseArrays/src/higherorderfns.jl | 5 +- stdlib/SparseArrays/src/linalg.jl | 22 +- stdlib/SparseArrays/src/sparsematrix.jl | 2 +- stdlib/SparseArrays/src/sparsevector.jl | 30 +- stdlib/SparseArrays/test/runtests.jl | 1 + stdlib/SparseArrays/test/sparse.jl | 20 +- stdlib/SparseArrays/test/sparsevector.jl | 22 +- stdlib/SuiteSparse/src/SuiteSparse.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 9 +- stdlib/SuiteSparse/src/deprecated.jl | 48 +- stdlib/SuiteSparse/src/spqr.jl | 35 +- stdlib/SuiteSparse/src/umfpack.jl | 9 +- stdlib/SuiteSparse/test/cholmod.jl | 18 +- stdlib/SuiteSparse/test/runtests.jl | 2 +- stdlib/SuiteSparse/test/spqr.jl | 2 +- stdlib/SuiteSparse/test/umfpack.jl | 10 +- stdlib/Test/src/Test.jl | 26 - test/abstractarray.jl | 8 +- test/ambiguous.jl | 8 +- test/arrayops.jl | 4 +- test/bitarray.jl | 2 +- test/broadcast.jl | 2 +- test/choosetests.jl | 34 +- test/compile.jl | 2 +- test/complex.jl | 2 + test/core.jl | 1 + test/dimensionful.jl | 3 +- test/docs.jl | 23 +- test/hashing.jl | 2 +- test/math.jl | 1 + test/numbers.jl | 1 + test/offsetarray.jl | 1 + test/perf/blas/level3.jl | 2 +- test/perf/threads/stockcorr/pstockcorr.jl | 2 +- test/replutil.jl | 2 +- test/runtests.jl | 3 +- test/show.jl | 20 +- test/statistics.jl | 2 +- test/subtype.jl | 7 +- 126 files changed, 2515 insertions(+), 2429 deletions(-) delete mode 100644 doc/src/base/linalg.md rename doc/src/manual/linear-algebra.md => stdlib/LinearAlgebra/docs/src/index.md (53%) rename base/linalg/linalg.jl => stdlib/LinearAlgebra/src/LinearAlgebra.jl (90%) rename {base/linalg => stdlib/LinearAlgebra/src}/adjtrans.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/bidiag.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/bitarray.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/blas.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/bunchkaufman.jl (97%) rename {base/linalg => stdlib/LinearAlgebra/src}/cholesky.jl (98%) rename {base/linalg => stdlib/LinearAlgebra/src}/conjarray.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/dense.jl (100%) create mode 100644 stdlib/LinearAlgebra/src/deprecated.jl rename {base/linalg => stdlib/LinearAlgebra/src}/diagonal.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/eigen.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/exceptions.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/factorization.jl (97%) rename {base/linalg => stdlib/LinearAlgebra/src}/generic.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/givens.jl (98%) rename {base/linalg => stdlib/LinearAlgebra/src}/hessenberg.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/lapack.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/ldlt.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/lq.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/lu.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/matmul.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/qr.jl (98%) rename {base/linalg => stdlib/LinearAlgebra/src}/rowvector.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/schur.jl (95%) rename {base/linalg => stdlib/LinearAlgebra/src}/special.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/svd.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/symmetric.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/transpose.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/triangular.jl (99%) rename {base/linalg => stdlib/LinearAlgebra/src}/tridiag.jl (100%) rename {base/linalg => stdlib/LinearAlgebra/src}/uniformscaling.jl (99%) rename {test/linalg => stdlib/LinearAlgebra/test}/adjtrans.jl (99%) rename {test/linalg => stdlib/LinearAlgebra/test}/bidiag.jl (96%) rename {test/linalg => stdlib/LinearAlgebra/test}/blas.jl (94%) rename {test/linalg => stdlib/LinearAlgebra/test}/bunchkaufman.jl (90%) rename {test/linalg => stdlib/LinearAlgebra/test}/cholesky.jl (89%) rename {test/linalg => stdlib/LinearAlgebra/test}/dense.jl (99%) rename {test/linalg => stdlib/LinearAlgebra/test}/diagonal.jl (98%) rename {test/linalg => stdlib/LinearAlgebra/test}/eigen.jl (96%) rename {test/linalg => stdlib/LinearAlgebra/test}/generic.jl (81%) rename {test/linalg => stdlib/LinearAlgebra/test}/givens.jl (94%) rename {test/linalg => stdlib/LinearAlgebra/test}/hessenberg.jl (91%) rename {test/linalg => stdlib/LinearAlgebra/test}/lapack.jl (95%) rename {test/linalg => stdlib/LinearAlgebra/test}/lq.jl (96%) rename {test/linalg => stdlib/LinearAlgebra/test}/lu.jl (93%) rename {test/linalg => stdlib/LinearAlgebra/test}/matmul.jl (88%) rename {test/linalg => stdlib/LinearAlgebra/test}/pinv.jl (97%) rename {test/linalg => stdlib/LinearAlgebra/test}/qr.jl (94%) create mode 100644 stdlib/LinearAlgebra/test/runtests.jl rename {test/linalg => stdlib/LinearAlgebra/test}/schur.jl (92%) rename {test/linalg => stdlib/LinearAlgebra/test}/special.jl (95%) rename {test/linalg => stdlib/LinearAlgebra/test}/svd.jl (96%) rename {test/linalg => stdlib/LinearAlgebra/test}/symmetric.jl (98%) create mode 100644 stdlib/LinearAlgebra/test/testutils.jl rename {test/linalg => stdlib/LinearAlgebra/test}/triangular.jl (97%) rename {test/linalg => stdlib/LinearAlgebra/test}/tridiag.jl (92%) rename {test/linalg => stdlib/LinearAlgebra/test}/uniformscaling.jl (93%) diff --git a/NEWS.md b/NEWS.md index ad4482e1fd6e6f..c436c2f5dc2243 100644 --- a/NEWS.md +++ b/NEWS.md @@ -898,6 +898,9 @@ Deprecated or removed * Sparse array functionality has moved to the `SparseArrays` standard library module ([#25249]). + * Linear algebra functionality, and specifically the `LinAlg` module has moved to the + `LinearAlgebra` standard library module ([#25571]). + * `@printf` and `@sprintf` have been moved to the `Printf` standard library ([#23929],[#25056]). * The aliases `Complex32`, `Complex64` and `Complex128` have been deprecated in favor of `ComplexF16`, diff --git a/base/array.jl b/base/array.jl index 8d2a6a5443eb97..6a3390ef2aaf15 100644 --- a/base/array.jl +++ b/base/array.jl @@ -391,7 +391,12 @@ end function _one(unit::T, x::AbstractMatrix) where T m,n = size(x) m==n || throw(DimensionMismatch("multiplicative identity defined only for square matrices")) - Matrix{T}(I, m, m) + # Matrix{T}(I, m, m) + I = zeros(T, m, m) + for i in 1:m + I[i,i] = 1 + end + I end one(x::AbstractMatrix{T}) where {T} = _one(one(T), x) diff --git a/base/deprecated.jl b/base/deprecated.jl index b8a7b022cdbe42..db3bf5ca1a3a3b 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -196,27 +196,10 @@ next(p::Union{Process, ProcessChain}, i::Int) = (getindex(p, i), i + 1) return i == 1 ? getfield(p, p.openstream) : p end -import .LinAlg: cond -@deprecate cond(F::LinAlg.LU, p::Integer) cond(convert(AbstractArray, F), p) - # PR #21974 @deprecate versioninfo(verbose::Bool) versioninfo(verbose=verbose) @deprecate versioninfo(io::IO, verbose::Bool) versioninfo(io, verbose=verbose) -# PR #22188 -import .LinAlg: cholfact, cholfact! -@deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false)) -@deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo)) -@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false)) -@deprecate cholfact(A::StridedMatrix, uplo::Symbol) cholfact(Hermitian(A, uplo)) -@deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{true}}; tol = 0.0) cholfact!(Hermitian(A, uplo), Val(true), tol = tol) -@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{true}}; tol = 0.0) cholfact(Hermitian(A, uplo), Val(true), tol = tol) - -# PR #22245 -import .LinAlg: isposdef, isposdef! -@deprecate isposdef(A::AbstractMatrix, UL::Symbol) isposdef(Hermitian(A, UL)) -@deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) - # also remove all support machinery in src for current_module when removing this deprecation # and make Base.include an error _current_module() = ccall(:jl_get_current_module, Ref{Module}, ()) @@ -394,21 +377,6 @@ export conv, conv2, deconv, filt, filt!, xcorr @deprecate cov(X::AbstractVector, Y::AbstractVector, corrected::Bool) cov(X, Y, corrected=corrected) @deprecate cov(X::AbstractVecOrMat, Y::AbstractVecOrMat, vardim::Int, corrected::Bool) cov(X, Y, vardim, corrected=corrected) -# bkfact -import .LinAlg: bkfact, bkfact! -function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) - depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", - "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), - :bkfact) - return bkfact(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) -end -function bkfact!(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) - depwarn(string("`bkfact!` with uplo and symmetric arguments is deprecated, ", - "use `bkfact!($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), - :bkfact!) - return bkfact!(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) -end - # PR #22325 # TODO: when this replace is removed from deprecated.jl: # 1) rename the function replace_new from strings/util.jl to replace @@ -435,20 +403,6 @@ end @deprecate fill_to_length(t, val, ::Type{Val{N}}) where {N} fill_to_length(t, val, Val(N)) false @deprecate literal_pow(a, b, ::Type{Val{N}}) where {N} literal_pow(a, b, Val(N)) false @eval IteratorsMD @deprecate split(t, V::Type{Val{n}}) where {n} split(t, Val(n)) false -@deprecate sqrtm(A::UpperTriangular{T},::Type{Val{realmatrix}}) where {T,realmatrix} sqrtm(A, Val(realmatrix)) -import .LinAlg: lufact, lufact!, qrfact, qrfact!, cholfact, cholfact! -@deprecate lufact(A::AbstractMatrix, ::Type{Val{false}}) lufact(A, Val(false)) -@deprecate lufact(A::AbstractMatrix, ::Type{Val{true}}) lufact(A, Val(true)) -@deprecate lufact!(A::AbstractMatrix, ::Type{Val{false}}) lufact!(A, Val(false)) -@deprecate lufact!(A::AbstractMatrix, ::Type{Val{true}}) lufact!(A, Val(true)) -@deprecate qrfact(A::AbstractMatrix, ::Type{Val{false}}) qrfact(A, Val(false)) -@deprecate qrfact(A::AbstractMatrix, ::Type{Val{true}}) qrfact(A, Val(true)) -@deprecate qrfact!(A::AbstractMatrix, ::Type{Val{false}}) qrfact!(A, Val(false)) -@deprecate qrfact!(A::AbstractMatrix, ::Type{Val{true}}) qrfact!(A, Val(true)) -@deprecate cholfact(A::AbstractMatrix, ::Type{Val{false}}) cholfact(A, Val(false)) -@deprecate cholfact(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact(A, Val(true); tol = tol) -@deprecate cholfact!(A::AbstractMatrix, ::Type{Val{false}}) cholfact!(A, Val(false)) -@deprecate cholfact!(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact!(A, Val(true); tol = tol) @deprecate cat(::Type{Val{N}}, A::AbstractArray...) where {N} cat(Val(N), A...) @deprecate cat_t(::Type{Val{N}}, ::Type{T}, A, B) where {N,T} cat_t(Val(N), T, A, B) false @deprecate reshape(A::AbstractArray, ::Type{Val{N}}) where {N} reshape(A, Val(N)) @@ -483,12 +437,6 @@ function OverflowError() OverflowError("") end -# PR #22703 -import .LinAlg: Bidiagonal -@deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, isupper::Bool) Bidiagonal(dv, ev, ifelse(isupper, :U, :L)) -@deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, uplo::Char) Bidiagonal(dv, ev, ifelse(uplo == 'U', :U, :L)) -@deprecate Bidiagonal(A::AbstractMatrix, isupper::Bool) Bidiagonal(A, ifelse(isupper, :U, :L)) - @deprecate fieldnames(v) fieldnames(typeof(v)) # nfields(::Type) deprecation in builtins.c: update nfields tfunc in compiler/tfuncs.jl when it is removed. # also replace `_nfields` with `nfields` in summarysize.c when this is removed. @@ -532,45 +480,6 @@ import .LinAlg: Bidiagonal # issue #6466 # `write` on non-isbits arrays is deprecated in io.jl. -# PR #22925 -# also uncomment constructor tests in test/linalg/bidiag.jl -import .LinAlg: Bidiagonal -function Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T,S} - depwarn(string("`Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T, S}`", - " is deprecated, manually convert both vectors to the same type instead."), :Bidiagonal) - R = promote_type(T, S) - Bidiagonal(convert(Vector{R}, dv), convert(Vector{R}, ev), uplo) -end - -# PR #23035 -# also uncomment constructor tests in test/linalg/tridiag.jl -import .LinAlg: SymTridiagonal -function SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) where {T,S} - depwarn(string("`SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) ", - "where {T, S}` is deprecated, convert both vectors to the same type instead."), :SymTridiagonal) - R = promote_type(T, S) - SymTridiagonal(convert(Vector{R}, dv), convert(Vector{R}, ev)) -end - -# PR #23154 -# also uncomment constructor tests in test/linalg/tridiag.jl -import .LinAlg: Tridiagonal -function Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) where {Tl,Td,Tu} - depwarn(string("`Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) ", - "where {Tl, Td, Tu}` is deprecated, convert all vectors to the same type instead."), :Tridiagonal) - Tridiagonal(map(v->convert(Vector{promote_type(Tl,Td,Tu)}, v), (dl, d, du))...) -end - -# deprecate sqrtm in favor of sqrt -@deprecate sqrtm sqrt - -# deprecate expm in favor of exp -@deprecate expm! exp! -@deprecate expm exp - -# deprecate logm in favor of log -@deprecate logm log - # PR #23092 @eval LibGit2 begin function prompt(msg::AbstractString; default::AbstractString="", password::Bool=false) @@ -672,17 +581,12 @@ end # PR #23066 @deprecate cfunction(f, r, a::Tuple) cfunction(f, r, Tuple{a...}) -# PR #23373 -import .LinAlg: diagm -@deprecate diagm(A::BitMatrix) BitMatrix(Diagonal(vec(A))) - # PR 23341 @eval GMP @deprecate gmp_version() version() false @eval GMP @Base.deprecate_binding GMP_VERSION VERSION false @eval GMP @deprecate gmp_bits_per_limb() bits_per_limb() false @eval GMP @Base.deprecate_binding GMP_BITS_PER_LIMB BITS_PER_LIMB false @eval MPFR @deprecate get_version() version() false -@eval LinAlg.LAPACK @deprecate laver() version() false # PR #23427 @deprecate_binding e ℯ true ", use ℯ (\\euler) or `Base.MathConstants.e`" @@ -787,32 +691,6 @@ end @deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr) -# deprecate zeros(D::Diagonal[, opts...]) -function zeros(D::Diagonal) - depwarn(string("`zeros(D::Diagonal)` is deprecated, use ", - "`Diagonal(fill!(similar(D.diag), 0))` instead, or ", - "`Diagonal(fill!(similar(D.diag), zero(eltype(D.diag))))` where necessary."), :zeros) - return Diagonal(fill!(similar(D.diag), zero(eltype(D.diag)))) -end -function zeros(D::Diagonal, ::Type{T}) where {T} - depwarn(string("`zeros(D::Diagonal, ::Type{T}) where T` is deprecated, use ", - "`Diagonal(fill!(similar(D.diag, T), 0))` instead, or ", - "`Diagonal(fill!(similar(D.diag, T), zero(T)))` where necessary."), :zeros) - return Diagonal(fill!(similar(D.diag, T), zero(T))) -end -function zeros(D::Diagonal, ::Type{T}, dims::Dims) where {T} - depwarn(string("`zeros(D::Diagonal, ::Type{T}, dims::Dims) where T` is deprecated, ", - "use `fill!(similar(D, T, dims), 0)` instead, or ", - "`fill!(similar(D, T, dims), zero(T))` where necessary."), :zeros) - return fill!(similar(D, T, dims), zero(T)) -end -function zeros(D::Diagonal, ::Type{T}, dims::Integer...) where {T} - depwarn(string("`zeros(D::Diagonal, ::Type{T}, dims::Integer...) where T` is deprecated, ", - "use `fill!(similar(D, T, dims), 0)` instead, or ", - "`fill!(similar(D, T, dims), zero(T))` where necessary."), :zeros) - return fill!(similar(D, T, dims), zero(T)) -end - # PR #23690 # `SSHCredential` and `UserPasswordCredential` constructors using `prompt_if_incorrect` # are deprecated in base/libgit2/types.jl. @@ -874,53 +752,6 @@ end @deprecate get_creds!(cache::CachedCredentials, credid, default) get!(cache, credid, default) end -## goodbeye, eye! -export eye -function eye(m::Integer) - depwarn(string("`eye(m::Integer)` has been deprecated in favor of `I` and `Matrix` ", - "constructors. For a direct replacement, consider `Matrix(1.0I, m, m)` or ", - "`Matrix{Float64}(I, m, m)`. If `Float64` element type is not necessary, ", - "consider the shorter `Matrix(I, m, m)` (with default `eltype(I)` `Bool`)."), :eye) - return Matrix{Float64}(I, m, m) -end -function eye(::Type{T}, m::Integer) where T - depwarn(string("`eye(T::Type, m::Integer)` has been deprecated in favor of `I` and ", - "`Matrix` constructors. For a direct replacement, consider `Matrix{T}(I, m, m)`. If ", - "`T` element type is not necessary, consider the shorter `Matrix(I, m, m)`", - "(with default `eltype(I)` `Bool`)"), :eye) - return Matrix{T}(I, m, m) -end -function eye(m::Integer, n::Integer) - depwarn(string("`eye(m::Integer, n::Integer)` has been deprecated in favor of `I` and ", - "`Matrix` constructors. For a direct replacement, consider `Matrix(1.0I, m, n)` ", - "or `Matrix{Float64}(I, m, n)`. If `Float64` element type is not necessary, ", - "consider the shorter `Matrix(I, m, n)` (with default `eltype(I)` `Bool`)."), :eye) - return Matrix{Float64}(I, m, n) -end -function eye(::Type{T}, m::Integer, n::Integer) where T - depwarn(string("`eye(T::Type, m::Integer, n::Integer)` has been deprecated in favor of ", - "`I` and `Matrix` constructors. For a direct replacement, consider `Matrix{T}(I, m, n)`.", - "If `T` element type is not necessary, consider the shorter `Matrix(I, m, n)` ", - "(with default `eltype(I)` `Bool`)."), :eye) - return Matrix{T}(I, m, n) -end -function eye(A::AbstractMatrix{T}) where T - depwarn(string("`eye(A::AbstractMatrix{T}) where T` has been deprecated in favor of `I` and ", - "`Matrix` constructors. For a direct replacement, consider `Matrix{eltype(A)}(I, size(A))`.", - "If `eltype(A)` element type is not necessary, consider the shorter `Matrix(I, size(A))` ", - "(with default `eltype(I)` `Bool`)."), :eye) - return Matrix(one(T)I, size(A)) -end -function eye(::Type{Diagonal{T}}, n::Int) where T - depwarn(string("`eye(DT::Type{Diagonal{T}}, n::Int) where T` has been deprecated in favor of `I` ", - "and `Diagonal` constructors. For a direct replacement, consider `Diagonal{T}(I, n)`. ", - "If `T` element type is not necessary, consider the shorter `Diagonal(I, n)` ", - "(with default `eltype(I)` `Bool`)."), :eye) - return Diagonal{T}(I, n) -end -@eval Base.LinAlg import Base.eye - - export tic, toq, toc function tic() depwarn("`tic()` is deprecated, use `@time`, `@elapsed`, or calls to `time_ns()` instead.", :tic) @@ -960,43 +791,6 @@ end Broadcast.dotview(A::AbstractArray{<:AbstractArray}, args::Integer...) = getindex(A, args...) # Upon removing deprecations, also enable the @testset "scalar .=" in test/broadcast.jl -# PR #23816: deprecation of gradient -export gradient -@eval Base.LinAlg begin - export gradient - - function gradient(args...) - Base.depwarn("`gradient` is deprecated and will be removed in the next release.", :gradient) - return _gradient(args...) - end - - _gradient(F::BitVector) = _gradient(Array(F)) - _gradient(F::BitVector, h::Real) = _gradient(Array(F), h) - _gradient(F::Vector, h::BitVector) = _gradient(F, Array(h)) - _gradient(F::BitVector, h::Vector) = _gradient(Array(F), h) - _gradient(F::BitVector, h::BitVector) = _gradient(Array(F), Array(h)) - - function _gradient(F::AbstractVector, h::Vector) - n = length(F) - T = typeof(oneunit(eltype(F))/oneunit(eltype(h))) - g = similar(F, T) - if n == 1 - g[1] = zero(T) - elseif n > 1 - g[1] = (F[2] - F[1]) / (h[2] - h[1]) - g[n] = (F[n] - F[n-1]) / (h[end] - h[end-1]) - if n > 2 - h = h[3:n] - h[1:n-2] - g[2:n-1] = (F[3:n] - F[1:n-2]) ./ h - end - end - g - end - - _gradient(F::AbstractVector) = _gradient(F, [1:length(F);]) - _gradient(F::AbstractVector, h::Real) = _gradient(F, [h*(1:length(F));]) -end - @noinline function getaddrinfo(callback::Function, host::AbstractString) depwarn("`getaddrinfo` with a callback function is deprecated, wrap code in `@async` instead for deferred execution.", :getaddrinfo) @async begin @@ -1022,26 +816,6 @@ end # After deprecation is removed, enable the @testset "indexing by Bool values" in test/arrayops.jl # Also un-comment the new definition in base/indices.jl -# deprecate odd fill! methods -@deprecate fill!(D::Diagonal, x) LinAlg.fillstored!(D, x) -@deprecate fill!(A::Base.LinAlg.AbstractTriangular, x) LinAlg.fillstored!(A, x) - -# PR #25030 -@eval LinAlg @deprecate fillslots! fillstored! false - -function diagm(v::BitVector) - depwarn(string("`diagm(v::BitVector)` is deprecated, use `diagm(0 => v)` or ", - "`BitMatrix(Diagonal(v))` instead."), :diagm) - return BitMatrix(Diagonal(v)) -end -function diagm(v::AbstractVector) - depwarn(string("`diagm(v::AbstractVector)` is deprecated, use `diagm(0 => v)` or ", - "`Matrix(Diagonal(v))` instead."), :diagm) - return Matrix(Diagonal(v)) -end -@deprecate diagm(v::AbstractVector, k::Integer) diagm(k => v) -@deprecate diagm(x::Number) fill(x, 1, 1) - # deprecate BitArray{...}(shape...) constructors to BitArray{...}(uninitialized, shape...) equivalents @deprecate BitArray{N}(dims::Vararg{Int,N}) where {N} BitArray{N}(uninitialized, dims) @deprecate BitArray(dims::NTuple{N,Int}) where {N} BitArray(uninitialized, dims...) @@ -1062,123 +836,10 @@ function full(A::AbstractArray) return A end -# full for structured arrays -function full(A::Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}) - mattypestr = isa(A, Diagonal) ? "Diagonal" : - isa(A, Bidiagonal) ? "Bidiagonal" : - isa(A, Tridiagonal) ? "Tridiagonal" : - isa(A, SymTridiagonal) ? "SymTridiagonal" : - error("should not be reachable!") - depwarn(string( - "`full(A::$(mattypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(A::$(mattypestr))`, consider `Matrix(A)` or, if that ", - "option is too narrow, `Array(A)`. Also consider `SparseMatrixCSC(A)` ", - "or, if that option is too narrow, `sparse(A)`."), :full) - return Matrix(A) -end - -# full for factorizations -function full(F::Union{LinAlg.LU,LinAlg.LQ,LinAlg.QR,LinAlg.QRPivoted,LinAlg.QRCompactWY, - LinAlg.SVD,LinAlg.LDLt,LinAlg.Schur,LinAlg.Eigen,LinAlg.Hessenberg, - LinAlg.Cholesky,LinAlg.CholeskyPivoted}) - facttypestr = isa(F, LinAlg.LU) ? "LU" : - isa(F, LinAlg.LQ) ? "LQ" : - isa(F, LinAlg.QR) ? "QR" : - isa(F, LinAlg.QRPivoted) ? "QRPivoted" : - isa(F, LinAlg.QRCompactWY) ? "QRCompactWY" : - isa(F, LinAlg.SVD) ? "SVD" : - isa(F, LinAlg.LDLt) ? "LDLt" : - isa(F, LinAlg.Schur) ? "Schur" : - isa(F, LinAlg.Eigen) ? "Eigen" : - isa(F, LinAlg.Hessenberg) ? "Hessenberg" : - isa(F, LinAlg.Cholesky) ? "Cholesky" : - isa(F, LinAlg.CholeskyPivoted) ? "CholeskyPivoted" : - error("should not be reachable!") - depwarn(string( - "`full(F::$(facttypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(F::$(facttypestr))`, consider `Matrix(F)`, `AbstractMatrix(F)` or, ", - "if those options are too narrow, `Array(F)` or `AbstractArray(F)`."), :full) - return AbstractMatrix(F) -end - -# full for implicit orthogonal factors -function full(Q::LinAlg.HessenbergQ) - depwarn(string( - "`full(Q::HessenbergQ)` (and `full` in general) has been deprecated. ", - "To replace `full(Q::HessenbergQ)`, consider `Matrix(Q)` or, ", - "if that option is too narrow, `Array(Q)`."), :full) - return Matrix(Q) -end -function full(Q::LinAlg.LQPackedQ; thin::Bool = true) - depwarn(string( - "`full(Q::LQPackedQ; thin::Bool = true)` (and `full` in general) ", - "has been deprecated. To replace `full(Q::LQPackedQ, true)`, ", - "consider `Matrix(Q)` or `Array(Q)`. To replace `full(Q::LQPackedQ, false)`, ", - "consider `Base.LinAlg.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2)))`."), :full) - return thin ? Array(Q) : Base.LinAlg.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2))) -end -function full(Q::Union{LinAlg.QRPackedQ,LinAlg.QRCompactWYQ}; thin::Bool = true) - qtypestr = isa(Q, LinAlg.QRPackedQ) ? "QRPackedQ" : - isa(Q, LinAlg.QRCompactWYQ) ? "QRCompactWYQ" : - error("should not be reachable!") - depwarn(string( - "`full(Q::$(qtypestr); thin::Bool = true)` (and `full` in general) ", - "has been deprecated. To replace `full(Q::$(qtypestr), true)`, ", - "consider `Matrix(Q)` or `Array(Q)`. To replace `full(Q::$(qtypestr), false)`, ", - "consider `Base.LinAlg.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 1), size(Q.factors, 1)))`."), :full) - return thin ? Array(Q) : Base.LinAlg.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 1), size(Q.factors, 1))) -end - -# full for symmetric / hermitian / triangular wrappers -function full(A::Symmetric) - depwarn(string( - "`full(A::Symmetric)` (and `full` in general) has been deprecated. ", - "To replace `full(A::Symmetric)`, as appropriate consider `Matrix(A)`, ", - "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", - "or `Base.LinAlg.copytri!(copy(parent(A)), A.uplo)`."), :full) - return Matrix(A) -end -function full(A::Hermitian) - depwarn(string( - "`full(A::Hermitian)` (and `full` in general) has been deprecated. ", - "To replace `full(A::Hermitian)`, as appropriate consider `Matrix(A)`, ", - "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", - "or `Base.LinAlg.copytri!(copy(parent(A)), A.uplo, true)`."), :full) - return Matrix(A) -end -function full(A::Union{UpperTriangular,LowerTriangular}) - (tritypestr, tri!str) = - isa(A, UpperTriangular) ? ("UpperTriangular", "triu!") : - isa(A, LowerTriangular) ? ("LowerTriangular", "tril!") : - error("should not be reachable!") - depwarn(string( - "`full(A::$(tritypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(A::$(tritypestr))`, as appropriate consider `Matrix(A)`, ", - "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", - "or `$(tri!str)(copy(parent(A)))`."), :full) - return Matrix(A) -end -function full(A::Union{LinAlg.UnitUpperTriangular,LinAlg.UnitLowerTriangular}) - tritypestr = isa(A, LinAlg.UnitUpperTriangular) ? "LinAlg.UnitUpperTriangular" : - isa(A, LinAlg.UnitLowerTriangular) ? "LinAlg.UnitLowerTriangular" : - error("should not be reachable!") - depwarn(string( - "`full(A::$(tritypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(A::$(tritypestr))`, as appropriate consider `Matrix(A)`, ", - "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, or `copyto!(similar(parent(A)), A)`."), :full) - return Matrix(A) -end - - # issue #20816 @deprecate strwidth textwidth @deprecate charwidth textwidth -# TODO: after 0.7, remove thin keyword argument and associated logic from... -# (1) base/linalg/svd.jl -# (2) base/linalg/qr.jl -# (3) base/linalg/lq.jl - @deprecate find(x::Number) findall(!iszero, x) @deprecate findnext(A, v, i::Integer) findnext(equalto(v), A, i) @deprecate findfirst(A, v) findfirst(equalto(v), A) @@ -1236,14 +897,8 @@ end # issue #20899 # TODO: delete JULIA_HOME deprecation in src/init.c -@eval LinAlg begin - @deprecate chol!(x::Number, uplo) chol(x) false -end - @deprecate cumsum(A::AbstractArray) cumsum(A, 1) @deprecate cumprod(A::AbstractArray) cumprod(A, 1) -import .LinAlg: diff -@deprecate diff(A::AbstractMatrix) diff(A, 1) # issue #16307 @deprecate finalizer(o, f::Function) finalizer(f, o) @@ -1267,341 +922,6 @@ end ### deprecations for lazier, less jazzy linalg transition in the next several blocks ### - -# deprecate ConjArray -# TODO: between 0.7 and 1.0 remove -# 1) the type definitions in base/linalg/conjarray.jl -# 2) the include("base/linalg/conjarray.jl") from base/linalg/linalg.jl -# 3) the file base/linalg/conjarray.jl itself -@eval Base.LinAlg begin - export ConjArray, ConjVector, ConjMatrix - - function ConjArray(a::AbstractArray{T,N}) where {T,N} - Base.depwarn(_ConjArray_depstring(), :ConjArray) - return ConjArray{conj_type(T),N,typeof(a)}(a) - end - function ConjVector(v::AbstractVector{T}) where {T} - Base.depwarn(_ConjArray_depstring(), :ConjArray) - return ConjArray{conj_type(T),1,typeof(v)}(v) - end - function ConjMatrix(m::AbstractMatrix{T}) where {T} - Base.depwarn(_ConjArray_depstring(), :ConjArray) - return ConjArray{conj_type(T),2,typeof(m)}(m) - end - - _ConjArray_depstring() = string("`ConjRowVector` and `RowVector` have been deprecated in favor ", - "of `Adjoint` and `Transpose`, and, as part of the implementation of `ConjRowVector`", - "/`RowVector`, `ConjArray`s have been deprecated as well. Please see 0.7's NEWS.md ", - "for a more detailed explanation of the associated changes.") - - # This type can cause the element type to change under conjugation - e.g. an array of complex arrays. - @inline conj_type(x) = conj_type(typeof(x)) - @inline conj_type(::Type{T}) where {T} = promote_op(conj, T) - - @inline parent(c::ConjArray) = c.parent - @inline parent_type(c::ConjArray) = parent_type(typeof(c)) - @inline parent_type(::Type{ConjArray{T,N,A}}) where {T,N,A} = A - - @inline size(a::ConjArray) = size(a.parent) - IndexStyle(::CA) where {CA<:ConjArray} = IndexStyle(parent_type(CA)) - IndexStyle(::Type{CA}) where {CA<:ConjArray} = IndexStyle(parent_type(CA)) - - @propagate_inbounds getindex(a::ConjArray{T,N}, i::Int) where {T,N} = conj(getindex(a.parent, i)) - @propagate_inbounds getindex(a::ConjArray{T,N}, i::Vararg{Int,N}) where {T,N} = conj(getindex(a.parent, i...)) - @propagate_inbounds setindex!(a::ConjArray{T,N}, v, i::Int) where {T,N} = setindex!(a.parent, conj(v), i) - @propagate_inbounds setindex!(a::ConjArray{T,N}, v, i::Vararg{Int,N}) where {T,N} = setindex!(a.parent, conj(v), i...) - - @inline similar(a::ConjArray, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(a), T, dims) - - # Currently, this is default behavior for RowVector only - @inline conj(a::ConjArray) = parent(a) - - # Helper functions, currently used by RowVector - @inline _conj(a::AbstractArray) = ConjArray(a) - @inline _conj(a::AbstractArray{T}) where {T<:Real} = a - @inline _conj(a::ConjArray) = parent(a) - @inline _conj(a::ConjArray{T}) where {T<:Real} = parent(a) -end -@eval Base begin - export ConjArray -end - -# deprecate ConjRowVector/RowVector -# TODO: between 0.7 and 1.0 remove -# 1) the type definitions in base/linalg/rowvector.jl -# 2) the include("base/linalg/rowvector.jl") from base/linalg/linalg.jl -# 3) the file base/linalg/rowvector.jl itself -# 4) the RowVectors in the Unions in base/sparse/sparsevector.jl around lines 995, 1010, 1011, and 1012 -@eval Base.LinAlg begin - export RowVector - - _RowVector_depstring() = string("`ConjRowVector` and `RowVector` have been deprecated in favor ", - "of `Adjoint` and `Transpose`. Please see 0.7's NEWS.md for a more detailed explanation ", - "of the associated changes.") - - @inline check_types(::Type{T1}, ::AbstractVector{T2}) where {T1,T2} = check_types(T1, T2) - @pure check_types(::Type{T1}, ::Type{T2}) where {T1,T2} = T1 === transpose_type(T2) ? nothing : - error("Element type mismatch. Tried to create a `RowVector{$T1}` from an `AbstractVector{$T2}`") - - # The element type may be transformed as transpose is recursive - @inline transpose_type(::Type{T}) where {T} = promote_op(transpose, T) - - # Constructors that take a vector - function RowVector(vec::AbstractVector{T}) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return RowVector{transpose_type(T),typeof(vec)}(vec) - end - function RowVector{T}(vec::AbstractVector{T}) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return RowVector{T,typeof(vec)}(vec) - end - - # Constructors that take a size and default to Array - function RowVector{T}(::Uninitialized, n::Int) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return RowVector{T}(Vector{transpose_type(T)}(uninitialized, n)) - end - function RowVector{T}(::Uninitialized, n1::Int, n2::Int) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return n1 == 1 ? RowVector{T}(Vector{transpose_type(T)}(uninitialized, n2)) : - error("RowVector expects 1×N size, got ($n1,$n2)") - end - function RowVector{T}(::Uninitialized, n::Tuple{Int}) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return RowVector{T}(Vector{transpose_type(T)}(uninitialized, n[1])) - end - function RowVector{T}(::Uninitialized, n::Tuple{Int,Int}) where {T} - Base.depwarn(_RowVector_depstring(), :RowVector) - return n[1] == 1 ? RowVector{T}(Vector{transpose_type(T)}(uninitialized, n[2])) : - error("RowVector expects 1×N size, got $n") - end - - # Conversion of underlying storage - convert(::Type{RowVector{T,V}}, rowvec::RowVector) where {T,V<:AbstractVector} = - RowVector{T,V}(convert(V,rowvec.vec)) - - # similar tries to maintain the RowVector wrapper and the parent type - @inline similar(rowvec::RowVector) = RowVector(similar(parent(rowvec))) - @inline similar(rowvec::RowVector, ::Type{T}) where {T} = RowVector(similar(parent(rowvec), transpose_type(T))) - - # Resizing similar currently loses its RowVector property. - @inline similar(rowvec::RowVector, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(rowvec), T, dims) - - # Basic methods - - # replaced in the Adjoint/Transpose transition - # """ - # transpose(v::AbstractVector) - # - # The transposition operator (`.'`). - # - # # Examples - # ```jldoctest - # julia> v = [1,2,3] - # 3-element Array{Int64,1}: - # 1 - # 2 - # 3 - # - # julia> transpose(v) - # 1×3 RowVector{Int64,Array{Int64,1}}: - # 1 2 3 - # ``` - # """ - # @inline transpose(vec::AbstractVector) = RowVector(vec) - # @inline adjoint(vec::AbstractVector) = RowVector(_conj(vec)) - - # methods necessary to preserve RowVector's behavior through the Adjoint/Transpose transition - rvadjoint(v::AbstractVector) = RowVector(_conj(v)) - rvtranspose(v::AbstractVector) = RowVector(v) - rvadjoint(v::RowVector) = conj(v.vec) - rvadjoint(v::RowVector{<:Real}) = v.vec - rvtranspose(v::RowVector) = v.vec - rvtranspose(v::ConjRowVector) = copy(v.vec) - rvadjoint(x) = adjoint(x) - rvtranspose(x) = transpose(x) - - @inline transpose(rowvec::RowVector) = rowvec.vec - @inline transpose(rowvec::ConjRowVector) = copy(rowvec.vec) # remove the ConjArray wrapper from any raw vector - @inline adjoint(rowvec::RowVector) = conj(rowvec.vec) - @inline adjoint(rowvec::RowVector{<:Real}) = rowvec.vec - - parent(rowvec::RowVector) = rowvec.vec - vec(rowvec::RowVector) = rowvec.vec - - """ - conj(v::RowVector) - - Return a [`ConjArray`](@ref) lazy view of the input, where each element is conjugated. - - # Examples - ```jldoctest - julia> v = RowVector([1+im, 1-im]) - 1×2 RowVector{Complex{Int64},Array{Complex{Int64},1}}: - 1+1im 1-1im - - julia> conj(v) - 1×2 RowVector{Complex{Int64},ConjArray{Complex{Int64},1,Array{Complex{Int64},1}}}: - 1-1im 1+1im - ``` - """ - @inline conj(rowvec::RowVector) = RowVector(_conj(rowvec.vec)) - @inline conj(rowvec::RowVector{<:Real}) = rowvec - - # AbstractArray interface - @inline length(rowvec::RowVector) = length(rowvec.vec) - @inline size(rowvec::RowVector) = (1, length(rowvec.vec)) - @inline size(rowvec::RowVector, d) = ifelse(d==2, length(rowvec.vec), 1) - @inline axes(rowvec::RowVector) = (Base.OneTo(1), axes(rowvec.vec)[1]) - @inline axes(rowvec::RowVector, d) = ifelse(d == 2, axes(rowvec.vec)[1], Base.OneTo(1)) - IndexStyle(::RowVector) = IndexLinear() - IndexStyle(::Type{<:RowVector}) = IndexLinear() - - @propagate_inbounds getindex(rowvec::RowVector, i::Int) = rvtranspose(rowvec.vec[i]) - @propagate_inbounds setindex!(rowvec::RowVector, v, i::Int) = (setindex!(rowvec.vec, rvtranspose(v), i); rowvec) - - # Keep a RowVector where appropriate - @propagate_inbounds getindex(rowvec::RowVector, ::Colon, i::Int) = rvtranspose.(rowvec.vec[i:i]) - @propagate_inbounds getindex(rowvec::RowVector, ::Colon, inds::AbstractArray{Int}) = RowVector(rowvec.vec[inds]) - @propagate_inbounds getindex(rowvec::RowVector, ::Colon, ::Colon) = RowVector(rowvec.vec[:]) - - # helper function for below - @inline to_vec(rowvec::RowVector) = map(rvtranspose, rvtranspose(rowvec)) - @inline to_vec(x::Number) = x - @inline to_vecs(rowvecs...) = (map(to_vec, rowvecs)...,) - - # map: Preserve the RowVector by un-wrapping and re-wrapping, but note that `f` - # expects to operate within the transposed domain, so to_vec transposes the elements - @inline map(f, rowvecs::RowVector...) = RowVector(map(rvtranspose∘f, to_vecs(rowvecs...)...)) - - # broacast (other combinations default to higher-dimensional array) - @inline broadcast(f, rowvecs::Union{Number,RowVector}...) = - RowVector(broadcast(transpose∘f, to_vecs(rowvecs...)...)) - - # Horizontal concatenation # - - @inline hcat(X::RowVector...) = rvtranspose(vcat(map(rvtranspose, X)...)) - @inline hcat(X::Union{RowVector,Number}...) = rvtranspose(vcat(map(rvtranspose, X)...)) - - @inline typed_hcat(::Type{T}, X::RowVector...) where {T} = - rvtranspose(typed_vcat(T, map(rvtranspose, X)...)) - @inline typed_hcat(::Type{T}, X::Union{RowVector,Number}...) where {T} = - rvtranspose(typed_vcat(T, map(rvtranspose, X)...)) - - # Multiplication # - - # inner product -> dot product specializations - @inline *(rowvec::RowVector{T}, vec::AbstractVector{T}) where {T<:Real} = dot(parent(rowvec), vec) - @inline *(rowvec::ConjRowVector{T}, vec::AbstractVector{T}) where {T<:Real} = dot(rvadjoint(rowvec), vec) - @inline *(rowvec::ConjRowVector, vec::AbstractVector) = dot(rvadjoint(rowvec), vec) - - # Generic behavior - @inline function *(rowvec::RowVector, vec::AbstractVector) - if length(rowvec) != length(vec) - throw(DimensionMismatch("A has dimensions $(size(rowvec)) but B has dimensions $(size(vec))")) - end - sum(@inbounds(return rowvec[i]*vec[i]) for i = 1:length(vec)) - end - @inline *(rowvec::RowVector, mat::AbstractMatrix) = rvtranspose(transpose(mat) * rvtranspose(rowvec)) - *(::RowVector, ::RowVector) = throw(DimensionMismatch("Cannot multiply two transposed vectors")) - @inline *(vec::AbstractVector, rowvec::RowVector) = vec .* rowvec - *(vec::AbstractVector, rowvec::AbstractVector) = throw(DimensionMismatch("Cannot multiply two vectors")) - - # Transposed forms - *(::RowVector, ::Transpose{<:Any,<:AbstractVector}) = - throw(DimensionMismatch("Cannot multiply two transposed vectors")) - *(rowvec::RowVector, transmat::Transpose{<:Any,<:AbstractMatrix}) = - (mat = transmat.parent; rvtranspose(mat * rvtranspose(rowvec))) - *(rowvec1::RowVector, transrowvec2::Transpose{<:Any,<:RowVector}) = - (rowvec2 = transrowvec2.parent; rowvec1*rvtranspose(rowvec2)) - *(::AbstractVector, ::Transpose{<:Any,<:RowVector}) = - throw(DimensionMismatch("Cannot multiply two vectors")) - *(mat::AbstractMatrix, transrowvec::Transpose{<:Any,<:RowVector}) = - (rowvec = transrowvec.parent; mat * rvtranspose(rowvec)) - - *(transrowvec::Transpose{<:Any,<:RowVector}, transvec::Transpose{<:Any,<:AbstractVector}) = - rvtranspose(transrowvec.parent) * transpose(transvec.parent) - *(transrowvec1::Transpose{<:Any,<:RowVector}, transrowvec2::Transpose{<:Any,<:RowVector}) = - throw(DimensionMismatch("Cannot multiply two vectors")) - *(transvec::Transpose{<:Any,<:AbstractVector}, transrowvec::Transpose{<:Any,<:RowVector}) = - transpose(transvec.parent)*rvtranspose(transrowvec.parent) - *(transmat::Transpose{<:Any,<:AbstractMatrix}, transrowvec::Transpose{<:Any,<:RowVector}) = - transmat * rvtranspose(transrowvec.parent) - - *(::Transpose{<:Any,<:RowVector}, ::AbstractVector) = - throw(DimensionMismatch("Cannot multiply two vectors")) - *(transrowvec1::Transpose{<:Any,<:RowVector}, rowvec2::RowVector) = - rvtranspose(transrowvec1.parent) * rowvec2 - *(transvec::Transpose{<:Any,<:AbstractVector}, rowvec::RowVector) = - throw(DimensionMismatch("Cannot multiply two transposed vectors")) - - # Conjugated forms - *(::RowVector, ::Adjoint{<:Any,<:AbstractVector}) = - throw(DimensionMismatch("Cannot multiply two transposed vectors")) - *(rowvec::RowVector, adjmat::Adjoint{<:Any,<:AbstractMatrix}) = - rvadjoint(adjmat.parent * rvadjoint(rowvec)) - *(rowvec1::RowVector, adjrowvec2::Adjoint{<:Any,<:RowVector}) = - rowvec1 * rvadjoint(adjrowvec2.parent) - *(vec::AbstractVector, adjrowvec::Adjoint{<:Any,<:RowVector}) = - throw(DimensionMismatch("Cannot multiply two vectors")) - *(mat::AbstractMatrix, adjrowvec::Adjoint{<:Any,<:RowVector}) = - mat * rvadjoint(adjrowvec.parent) - - *(adjrowvec::Adjoint{<:Any,<:RowVector}, adjvec::Adjoint{<:Any,<:AbstractVector}) = - rvadjoint(adjrowvec.parent) * adjoint(adjvec.parent) - *(adjrowvec1::Adjoint{<:Any,<:RowVector}, adjrowvec2::Adjoint{<:Any,<:RowVector}) = - throw(DimensionMismatch("Cannot multiply two vectors")) - *(adjvec::Adjoint{<:Any,<:AbstractVector}, adjrowvec::Adjoint{<:Any,<:RowVector}) = - adjoint(adjvec.parent)*rvadjoint(adjrowvec.parent) - *(adjmat::Adjoint{<:Any,<:AbstractMatrix}, adjrowvec::Adjoint{<:Any,<:RowVector}) = - adjoint(adjmat.parent) * rvadjoint(adjrowvec.parent) - - *(::Adjoint{<:Any,<:RowVector}, ::AbstractVector) = throw(DimensionMismatch("Cannot multiply two vectors")) - *(adjrowvec1::Adjoint{<:Any,<:RowVector}, rowvec2::RowVector) = rvadjoint(adjrowvec1.parent) * rowvec2 - *(adjvec::Adjoint{<:Any,<:AbstractVector}, rowvec::RowVector) = throw(DimensionMismatch("Cannot multiply two transposed vectors")) - - # Pseudo-inverse - pinv(v::RowVector, tol::Real=0) = rvadjoint(pinv(rvadjoint(v), tol)) - - # Left Division # - - \(rowvec1::RowVector, rowvec2::RowVector) = pinv(rowvec1) * rowvec2 - \(mat::AbstractMatrix, rowvec::RowVector) = throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) - \(transmat::Transpose{<:Any,<:AbstractMatrix}, rowvec::RowVector) = - throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) - \(adjmat::Adjoint{<:Any,<:AbstractMatrix}, rowvec::RowVector) = - throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) - - # Right Division # - - @inline /(rowvec::RowVector, mat::AbstractMatrix) = rvtranspose(transpose(mat) \ rvtranspose(rowvec)) - /(rowvec::RowVector, transmat::Transpose{<:Any,<:AbstractMatrix}) = rvtranspose(transmat.parent \ rvtranspose(rowvec)) - /(rowvec::RowVector, adjmat::Adjoint{<:Any,<:AbstractMatrix}) = rvadjoint(adjmat.parent \ rvadjoint(rowvec)) - - - # definitions necessary for test/linalg/dense.jl to pass - # should be cleaned up / revised as necessary in the future - /(A::Number, B::Adjoint{<:Any,<:RowVector}) = /(A, rvadjoint(B.parent)) - /(A::Matrix, B::RowVector) = rvadjoint(rvadjoint(B) \ adjoint(A)) - - - # dismabiguation methods - *(A::Adjoint{<:Any,<:AbstractVector}, B::Transpose{<:Any,<:RowVector}) = adjoint(A.parent) * B - *(A::Adjoint{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:RowVector}) = A * rvtranspose(B.parent) - *(A::Transpose{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:RowVector}) = transpose(A.parent) * B - *(A::Transpose{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(B.parent) -end -@eval Base begin - export RowVector -end -@eval Base.LinAlg begin - # deprecate RowVector{T}(shape...) constructors to RowVector{T}(uninitialized, shape...) equivalents - @deprecate RowVector{T}(n::Int) where {T} RowVector{T}(uninitialized, n) - @deprecate RowVector{T}(n1::Int, n2::Int) where {T} RowVector{T}(uninitialized, n1, n2) - @deprecate RowVector{T}(n::Tuple{Int}) where {T} RowVector{T}(uninitialized, n) - @deprecate RowVector{T}(n::Tuple{Int,Int}) where {T} RowVector{T}(uninitialized, n) -end - # TODOs re. .' deprecation # (1) remove .' deprecation from src/julia-syntax.scm around line 2346 # (2) remove .' documentation from base/docs/basedocs.jl around line 255 @@ -1629,695 +949,6 @@ end @deprecate Ac_mul_Bc(a,b) (*)(adjoint(a), adjoint(b)) @deprecate A_mul_Bc(a,b) (*)(a, adjoint(b)) @deprecate Ac_mul_B(a,b) (*)(adjoint(a), b) -# additionally, the following in-place ops were exported from Base -export A_mul_B!, - A_mul_Bt!, At_mul_B!, At_mul_Bt!, - A_mul_Bc!, Ac_mul_B!, Ac_mul_Bc!, - A_ldiv_B!, At_ldiv_B!, Ac_ldiv_B! - -# operations formerly exported from and imported/extended by Base.LinAlg -@eval Base.LinAlg begin - import Base: A_mul_Bt, At_ldiv_Bt, A_rdiv_Bc, At_ldiv_B, Ac_mul_Bc, A_mul_Bc, Ac_mul_B, - Ac_ldiv_B, Ac_ldiv_Bc, At_mul_Bt, A_rdiv_Bt, At_mul_B - # most of these explicit exports are of course obviated by the deprecations below - # but life is easier just leaving them for now... - export A_ldiv_B!, - A_ldiv_Bc, - A_ldiv_Bt, - A_mul_B!, - A_mul_Bc, - A_mul_Bc!, - A_mul_Bt, - A_mul_Bt!, - A_rdiv_Bc, - A_rdiv_Bt, - Ac_ldiv_B, - Ac_ldiv_Bc, - Ac_ldiv_B!, - Ac_mul_B, - Ac_mul_B!, - Ac_mul_Bc, - Ac_mul_Bc!, - Ac_rdiv_B, - Ac_rdiv_Bc, - At_ldiv_B, - At_ldiv_Bt, - At_ldiv_B!, - At_mul_B, - At_mul_B!, - At_mul_Bt, - At_mul_Bt!, - At_rdiv_B, - At_rdiv_Bt -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/bidiag.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(C::AbstractMatrix, A::SymTridiagonal, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::BiTri, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::BiTriSym, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::AbstractTriangular, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::AbstractMatrix, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::Diagonal, B::BiTriSym) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractVector, A::BiTri, B::AbstractVector) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::BiTri, B::AbstractVecOrMat) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractVecOrMat, A::BiTri, B::AbstractVecOrMat) mul!(C, A, B) - @deprecate Ac_ldiv_B(A::Bidiagonal, v::RowVector) (\)(adjoint(A), v) - @deprecate At_ldiv_B(A::Bidiagonal, v::RowVector) (\)(transpose(A), v) - @deprecate Ac_ldiv_B(A::Bidiagonal{<:Number}, v::RowVector{<:Number}) (\)(adjoint(A), v) - @deprecate At_ldiv_B(A::Bidiagonal{<:Number}, v::RowVector{<:Number}) (\)(transpose(A), v) - @deprecate Ac_mul_B(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (*)(adjoint(A), B) - @deprecate A_mul_Bc(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (*)(A, adjoint(B)) - @deprecate A_rdiv_Bc(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (/)(A, adjoint(B)) - @deprecate A_ldiv_B!(A::Union{Bidiagonal, AbstractTriangular}, b::AbstractVector) ldiv!(A, b) - @deprecate At_ldiv_B!(A::Bidiagonal, b::AbstractVector) ldiv!(transpose(A), b) - @deprecate Ac_ldiv_B!(A::Bidiagonal, b::AbstractVector) ldiv!(adjoint(A), b) - @deprecate A_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(A, B) - @deprecate Ac_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(adjoint(A), B) - @deprecate At_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(transpose(A), B) - @deprecate At_ldiv_B(A::Bidiagonal, B::AbstractVecOrMat) (\)(transpose(A), B) - @deprecate Ac_ldiv_B(A::Bidiagonal, B::AbstractVecOrMat) ldiv!(adjoint(A), B) - @deprecate Ac_ldiv_B(A::Bidiagonal{TA}, B::AbstractVecOrMat{TB}) where {TA<:Number,TB<:Number} (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::Bidiagonal{TA}, B::AbstractVecOrMat{TB}) where {TA<:Number,TB<:Number} (\)(transpose(A), B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/tridiag.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(C::StridedVecOrMat, S::SymTridiagonal, B::StridedVecOrMat) mul!(C, S, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/diagonal.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(A::Union{LowerTriangular,UpperTriangular}, D::Diagonal) mul!(A, D) - @deprecate A_mul_B!(A::UnitLowerTriangular, D::Diagonal) mul!(A, D) - @deprecate A_mul_B!(A::UnitUpperTriangular, D::Diagonal) mul!(A, D) - @deprecate A_mul_B!(D::Diagonal, B::UnitLowerTriangular) mul!(D, B) - @deprecate A_mul_B!(D::Diagonal, B::UnitUpperTriangular) mul!(D, B) - @deprecate Ac_mul_B(D::Diagonal, B::Diagonal) (*)(adjoint(D), B) - @deprecate Ac_mul_B(A::AbstractTriangular, D::Diagonal) (*)(adjoint(A), D) - @deprecate Ac_mul_B(A::AbstractMatrix, D::Diagonal) (*)(adjoint(A), D) - @deprecate At_mul_B(D::Diagonal, B::Diagonal) (*)(transpose(D), B) - @deprecate At_mul_B(A::AbstractTriangular, D::Diagonal) (*)(transpose(A), D) - @deprecate At_mul_B(A::AbstractMatrix, D::Diagonal) (*)(transpose(A), D) - @deprecate A_mul_Bc(D::Diagonal, B::Diagonal) (*)(D, adjoint(B)) - @deprecate A_mul_Bc(D::Diagonal, B::AbstractTriangular) (*)(D, adjoint(B)) - @deprecate A_mul_Bc(D::Diagonal, Q::Union{QRCompactWYQ,QRPackedQ}) (*)(D, adjoint(Q)) - @deprecate A_mul_Bc(D::Diagonal, A::AbstractMatrix) (*)(D, adjoint(A)) - @deprecate A_mul_Bt(D::Diagonal, B::Diagonal) (*)(D, transpose(B)) - @deprecate A_mul_Bt(D::Diagonal, B::AbstractTriangular) (*)(D, transpose(B)) - @deprecate A_mul_Bt(D::Diagonal, A::AbstractMatrix) (*)(D, transpose(A)) - @deprecate Ac_mul_Bc(D::Diagonal, B::Diagonal) (*)(adjoint(D), adjoint(B)) - @deprecate At_mul_Bt(D::Diagonal, B::Diagonal) (*)(transpose(D), transpose(B)) - @deprecate A_mul_B!(A::Diagonal,B::Diagonal) mul!(A, B) - @deprecate At_mul_B!(A::Diagonal,B::Diagonal) mul!(transpose(A), B) - @deprecate Ac_mul_B!(A::Diagonal,B::Diagonal) mul!(adjoint(A), B) - @deprecate A_mul_B!(A::QRPackedQ, D::Diagonal) mul!(A, D) - @deprecate A_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(A, B) - @deprecate At_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(transpose(A), B) - @deprecate Ac_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(adjoint(A), B) - @deprecate A_mul_B!(A::AbstractMatrix,B::Diagonal) mul!(A, B) - @deprecate A_mul_Bt!(A::AbstractMatrix,B::Diagonal) mul!(A, transpose(B)) - @deprecate A_mul_Bc!(A::AbstractMatrix,B::Diagonal) mul!(A, adjoint(B)) - @deprecate A_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, A, in) - @deprecate Ac_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, adjoint(A), in) - @deprecate At_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, transpose(A), in) - @deprecate A_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, A, in) - @deprecate Ac_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, adjoint(A), in) - @deprecate At_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, transpose(A), in) - @deprecate A_mul_Bt(A::Diagonal, B::RealHermSymComplexSym) (*)(A, transpose(B)) - @deprecate At_mul_B(A::RealHermSymComplexSym, B::Diagonal) (*)(transpose(A), B) - @deprecate A_mul_Bc(A::Diagonal, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) - @deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::Diagonal) (*)(adjoint(A), B) - @deprecate A_ldiv_B!(D::Diagonal{T}, v::AbstractVector{T}) where {T} ldiv!(D, v) - @deprecate A_ldiv_B!(D::Diagonal{T}, V::AbstractMatrix{T}) where {T} ldiv!(D, V) - @deprecate Ac_ldiv_B!(D::Diagonal{T}, B::AbstractVecOrMat{T}) where {T} ldiv!(adjoint(D), B) - @deprecate At_ldiv_B!(D::Diagonal{T}, B::AbstractVecOrMat{T}) where {T} ldiv!(transpose(D), B) - @deprecate A_rdiv_B!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, D) - @deprecate A_rdiv_Bc!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, adjoint(D)) - @deprecate A_rdiv_Bt!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, transpose(D)) - @deprecate Ac_ldiv_B(F::Factorization, D::Diagonal) (\)(adjoint(F), D) - @deprecate A_mul_Bt(D::Diagonal, rowvec::RowVector) (*)(D, transpose(rowvec)) - @deprecate A_mul_Bc(D::Diagonal, rowvec::RowVector) (*)(D, adjoint(rowvec)) - @deprecate A_ldiv_B!(D::Diagonal, B::StridedVecOrMat) ldiv!(D, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/special.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_Bc!(A::AbstractTriangular, B::Union{QRCompactWYQ,QRPackedQ}) mul!(A, adjoint(B)) - @deprecate A_mul_Bc(A::AbstractTriangular, B::Union{QRCompactWYQ,QRPackedQ}) (*)(A, adjoint(B)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/bunchkaufman.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{T}) where {T<:BlasReal} ldiv!(B, R) - @deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(B, R) - @deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{S}) where {T,S} ldiv!(B, R) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/cholesky.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_ldiv_B!(C::Cholesky{T,<:AbstractMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(C, B) - @deprecate A_ldiv_B!(C::Cholesky{<:Any,<:AbstractMatrix}, B::StridedVecOrMat) ldiv!(C, B) - @deprecate A_ldiv_B!(C::CholeskyPivoted{T}, B::StridedVector{T}) where {T<:BlasFloat} ldiv!(C, B) - @deprecate A_ldiv_B!(C::CholeskyPivoted{T}, B::StridedMatrix{T}) where {T<:BlasFloat} ldiv!(C, B) - @deprecate A_ldiv_B!(C::CholeskyPivoted, B::StridedVector) ldiv!(C, B) - @deprecate A_ldiv_B!(C::CholeskyPivoted, B::StridedMatrix) ldiv!(C, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/factorization.jl, to deprecate -@eval Base.LinAlg begin - @deprecate Ac_ldiv_B(F::Factorization, B::AbstractVecOrMat) (\)(adjoint(F), B) - @deprecate A_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, A, B) - @deprecate Ac_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, adjoint(A), B) - @deprecate At_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, transpose(A), B) - @deprecate At_ldiv_B(F::Factorization{<:Real}, B::AbstractVecOrMat) (\)(transpose(F), B) - @deprecate At_ldiv_B(F::Factorization, B) (\)(transpose(F), B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/hessenberg.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(Q::HessenbergQ{T}, X::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(Q, X) - @deprecate A_mul_B!(X::StridedMatrix{T}, Q::HessenbergQ{T}) where {T<:BlasFloat} mul!(X, Q) - @deprecate Ac_mul_B!(Q::HessenbergQ{T}, X::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(adjoint(Q), X) - @deprecate A_mul_Bc!(X::StridedMatrix{T}, Q::HessenbergQ{T}) where {T<:BlasFloat} mul!(X, adjoint(Q)) - @deprecate Ac_mul_B(Q::HessenbergQ{T}, X::StridedVecOrMat{S}) where {T,S} (*)(adjoint(Q), X) - @deprecate A_mul_Bc(X::StridedVecOrMat{S}, Q::HessenbergQ{T}) where {T,S} (*)(X, adjoint(Q)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/ldlt.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_ldiv_B!(S::LDLt{T,M}, B::AbstractVecOrMat{T}) where {T,M<:SymTridiagonal{T}} ldiv!(S, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/svd.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_ldiv_B!(A::SVD{T}, B::StridedVecOrMat) where {T} ldiv!(A, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/symmetric.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(y::StridedVector{T}, A::Symmetric{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, A, x) - @deprecate A_mul_B!(y::StridedVector{T}, A::Hermitian{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasReal} mul!(y, A, x) - @deprecate A_mul_B!(y::StridedVector{T}, A::Hermitian{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasComplex} mul!(y, A, x) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::Symmetric{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Symmetric{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::Hermitian{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasReal} mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Hermitian{T,<:StridedMatrix}) where {T<:BlasReal} mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::Hermitian{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasComplex} mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Hermitian{T,<:StridedMatrix}) where {T<:BlasComplex} mul!(C, A, B) - @deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractVector) (*)(transpose(A), B) - @deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractMatrix) (*)(transpose(A), B) - @deprecate A_mul_Bt(A::AbstractMatrix, B::RealHermSymComplexSym) (*)(A, transpose(B)) - @deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractVector) (*)(adjoint(A), B) - @deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractMatrix) (*)(adjoint(A), B) - @deprecate A_mul_Bc(A::AbstractMatrix, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) - @deprecate A_mul_Bt(A::RowVector, B::RealHermSymComplexSym) (*)(A, transpose(B)) - @deprecate A_mul_Bc(A::RowVector, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) - @deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractTriangular) (*)(transpose(A), B) - @deprecate A_mul_Bt(A::AbstractTriangular, B::RealHermSymComplexSym) (*)(A, transpose(B)) - @deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractTriangular) (*)(adjoint(A), B) - @deprecate A_mul_Bc(A::AbstractTriangular, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/lu.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) - @deprecate A_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(A, B) - @deprecate At_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(transpose(A), B) - @deprecate At_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(transpose(A), B) - @deprecate Ac_ldiv_B!(F::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:Real} ldiv!(adjoint(F), B) - @deprecate Ac_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(adjoint(A), B) - @deprecate Ac_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(adjoint(A), B) - @deprecate At_ldiv_Bt(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} (\)(transpose(A), transpose(B)) - @deprecate At_ldiv_Bt(A::LU, B::StridedVecOrMat) (\)(transpose(A), transpose(B)) - @deprecate Ac_ldiv_Bc(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} (\)(adjoint(A), adjoint(B)) - @deprecate Ac_ldiv_Bc(A::LU, B::StridedVecOrMat) (\)(adjoint(A), adjoint(B)) - @deprecate A_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} ldiv!(A, B) - @deprecate At_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} (\)(transpose(A), B) - @deprecate Ac_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} ldiv!(adjoint(A), B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/lq.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(A::LQ{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate A_mul_B!(A::LQ{T}, B::QR{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate A_mul_B!(A::QR{T}, B::LQ{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate A_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate Ac_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasReal} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(adjoint(A), B) - @deprecate Ac_mul_B(A::LQPackedQ, B::StridedVecOrMat) (*)(adjoint(A), B) - @deprecate A_mul_Bc(A::LQPackedQ, B::StridedVecOrMat) (*)(A, adjoint(B)) - @deprecate Ac_mul_Bc(A::LQPackedQ, B::StridedVecOrMat) (*)(adjoint(A), adjoint(B)) - @deprecate A_mul_B!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) - @deprecate A_mul_Bc(A::StridedVecOrMat, Q::LQPackedQ) (*)(A, adjoint(Q)) - @deprecate Ac_mul_Bc(A::StridedMatrix, Q::LQPackedQ) (*)(adjoint(A), adjoint(Q)) - @deprecate Ac_mul_B(A::StridedMatrix, Q::LQPackedQ) (*)(adjoint(A), Q) - @deprecate A_ldiv_B!(A::LQ{T}, B::StridedVecOrMat{T}) where {T} ldiv!(A, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/qr.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:StridedMatrix} mul!(A, B) - @deprecate A_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:StridedMatrix} mul!(A, B) - @deprecate A_mul_B!(A::QRPackedQ, B::AbstractVecOrMat) mul!(A, B) - @deprecate Ac_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasReal,S<:StridedMatrix} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasComplex,S<:StridedMatrix} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasReal,S<:StridedMatrix} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasComplex,S<:StridedMatrix} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::QRPackedQ, B::AbstractVecOrMat) mul!(adjoint(A), B) - @deprecate Ac_mul_B(Q::AbstractQ, B::StridedVecOrMat) (*)(adjoint(Q), B) - @deprecate A_mul_Bc(Q::AbstractQ, B::StridedVecOrMat) (*)(Q, adjoint(B)) - @deprecate Ac_mul_Bc(Q::AbstractQ, B::StridedVecOrMat) (*)(adjoint(Q), adjoint(B)) - @deprecate A_mul_B!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T,S}) where {T<:BlasFloat,S<:StridedMatrix} mul!(A, B) - @deprecate A_mul_B!(A::StridedVecOrMat{T}, B::QRPackedQ{T,S}) where {T<:BlasFloat,S<:StridedMatrix} mul!(A, B) - @deprecate A_mul_B!(A::StridedMatrix,Q::QRPackedQ) mul!(A, Q) - @deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRPackedQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRPackedQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix,Q::QRPackedQ) mul!(A, adjoint(Q)) - @deprecate A_mul_Bc(A::StridedMatrix, B::AbstractQ) (*)(A, adjoint(B)) - @deprecate A_mul_Bc(rowvec::RowVector, B::AbstractQ) (*)(rowvec, adjoint(B)) - @deprecate Ac_mul_B(A::StridedVecOrMat, Q::AbstractQ) (*)(adjoint(A), Q) - @deprecate Ac_mul_Bc(A::StridedVecOrMat, Q::AbstractQ) (*)(adjoint(A), adjoint(Q)) - @deprecate A_ldiv_B!(A::QRCompactWY{T}, b::StridedVector{T}) where {T<:BlasFloat} ldiv!(A, b) - @deprecate A_ldiv_B!(A::QRCompactWY{T}, B::StridedMatrix{T}) where {T<:BlasFloat} ldiv!(A, B) - @deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedMatrix{T}, rcond::Real) where {T<:BlasFloat} ldiv!(A, B, rcond) - @deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedVector{T}) where {T<:BlasFloat} ldiv!(A, B) - @deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) - @deprecate A_ldiv_B!(A::QR{T}, B::StridedMatrix{T}) where {T} ldiv!(A, B) - @deprecate A_ldiv_B!(A::QR, B::StridedVector) ldiv!(A, B) - @deprecate A_ldiv_B!(A::QRPivoted, b::StridedVector) ldiv!(A, b) - @deprecate A_ldiv_B!(A::QRPivoted, B::StridedMatrix) ldiv!(A, B) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/matmul.jl, to deprecate -@eval Base.LinAlg begin - @deprecate Ac_mul_Bc(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(adjoint(A), adjoint(B)) - @deprecate Ac_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, adjoint(A), adjoint(B)) - @deprecate Ac_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), adjoint(B)) - @deprecate Ac_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), transpose(B)) - @deprecate A_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(C, A, adjoint(B)) - @deprecate A_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) - @deprecate A_mul_Bc(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(A, adjoint(B)) - @deprecate A_mul_Bc(A::StridedMatrix{<:BlasFloat}, B::StridedMatrix{<:BlasReal}) (*)(A, adjoint(B)) - @deprecate A_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{<:BlasReal}) where {T<:BlasFloat} mul!(C, A, adjoint(B)) - @deprecate Ac_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(C, adjoint(A), B) - @deprecate Ac_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), B) - @deprecate Ac_mul_B(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(adjoint(A), B) - @deprecate Ac_mul_B(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasReal} (*)(adjoint(A), B) - @deprecate Ac_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasReal} mul!(C, adjoint(A), B) - @deprecate At_mul_Bt!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, transpose(A), transpose(B)) - @deprecate At_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, transpose(A), transpose(B)) - @deprecate At_mul_Bt(A::AbstractMatrix{T}, B::AbstractVecOrMat{S}) where {T,S} (*)(transpose(A), transpose(B)) - @deprecate A_mul_Bt!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, transpose(B)) - @deprecate A_mul_Bt!(C::StridedMatrix{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, B::StridedVecOrMat{Float32}) mul!(C, A, transpose(B)) - @deprecate A_mul_Bt!(C::StridedMatrix{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, B::StridedVecOrMat{Float64}) mul!(C, A, transpose(B)) - @deprecate A_mul_Bt!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, A, transpose(B)) - @deprecate A_mul_Bt(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(A, transpose(B)) - @deprecate At_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, transpose(A), B) - @deprecate At_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, transpose(A), B) - @deprecate At_mul_B(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(transpose(A), B) - @deprecate A_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, B::StridedVecOrMat{Float32}) mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, B::StridedVecOrMat{Float64}) mul!(C, A, B) - @deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, A, B) - @deprecate Ac_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasReal} mul!(y, adjoint(A), x) - @deprecate Ac_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasComplex} mul!(y, adjoint(A), x) - @deprecate Ac_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, adjoint(A), x) - @deprecate Ac_mul_B(A::StridedMatrix{T}, x::StridedVector{S}) where {T<:BlasFloat,S} (*)(adjoint(A), x) - @deprecate Ac_mul_B(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} (*)(adjoint(A), x) - @deprecate At_mul_B(A::StridedMatrix{T}, x::StridedVector{S}) where {T<:BlasFloat,S} (*)(transpose(A), x) - @deprecate At_mul_B(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} (*)(transpose(A), x) - @deprecate At_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, transpose(A), x) - @deprecate At_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, transpose(A), x) - @deprecate A_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, A, x) - @deprecate A_mul_B!(y::StridedVector{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, x::StridedVector{Float32}) mul!(y, A, x) - @deprecate A_mul_B!(y::StridedVector{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, x::StridedVector{Float64}) mul!(y, A, x) - @deprecate A_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, A, x) - @deprecate A_mul_Bt(a::AbstractVector, B::AbstractMatrix) (*)(a, transpose(B)) - @deprecate A_mul_Bt(A::AbstractMatrix, b::AbstractVector) (*)(A, transpose(b)) - @deprecate A_mul_Bc(a::AbstractVector, B::AbstractMatrix) (*)(a, adjoint(B)) - @deprecate A_mul_Bc(A::AbstractMatrix, b::AbstractVector) (*)(A, adjoint(b)) - @deprecate At_mul_B(x::StridedVector{T}, y::StridedVector{T}) where {T<:BlasComplex} (*)(transpose(x), y) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/triangular.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_Bc(A::AbstractTriangular, B::AbstractTriangular) (*)(A, adjoint(B)) - @deprecate A_mul_Bt(A::AbstractTriangular, B::AbstractTriangular) (*)(A, transpose(B)) - @deprecate Ac_mul_B(A::AbstractTriangular, B::AbstractTriangular) (*)(adjoint(A), B) - @deprecate At_mul_B(A::AbstractTriangular, B::AbstractTriangular) (*)(transpose(A), B) - @deprecate Ac_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::RowVector) (\)(adjoint(A), B) - @deprecate Ac_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::RowVector) (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::RowVector) (\)(transpose(A), B) - @deprecate At_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::RowVector) (\)(transpose(A), B) - @deprecate A_rdiv_Bc(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) (/)(rowvec, adjoint(A)) - @deprecate A_rdiv_Bc(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) (/)(rowvec, adjoint(A)) - @deprecate A_rdiv_Bt(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) (/)(rowvec, transpose(A)) - @deprecate A_rdiv_Bt(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) (/)(rowvec, transpose(A)) - @deprecate A_mul_Bt(rowvec::RowVector, A::AbstractTriangular) (*)(rowvec, transpose(A)) - @deprecate A_mul_Bt(A::AbstractTriangular, rowvec::RowVector) (*)(A, transpose(rowvec)) - @deprecate At_mul_Bt(A::AbstractTriangular, rowvec::RowVector) (*)(transpose(A), transpose(rowvec)) - @deprecate A_mul_Bc(rowvec::RowVector, A::AbstractTriangular) (*)(rowvec, adjoint(A)) - @deprecate A_mul_Bc(A::AbstractTriangular, rowvec::RowVector) (*)(A, adjoint(rowvec)) - @deprecate Ac_mul_Bc(A::AbstractTriangular, rowvec::RowVector) (*)(adjoint(A), adjoint(rowvec)) - @deprecate Ac_mul_B(A::AbstractMatrix, B::AbstractTriangular) (*)(adjoint(A), B) - @deprecate At_mul_B(A::AbstractMatrix, B::AbstractTriangular) (*)(transpose(A), B) - @deprecate A_mul_Bc(A::AbstractTriangular, B::AbstractMatrix) (*)(A, adjoint(B)) - @deprecate A_mul_Bt(A::AbstractTriangular, B::AbstractMatrix) (*)(A, transpose(B)) - @deprecate Ac_mul_Bc(A::AbstractTriangular, B::AbstractTriangular) (*)(adjoint(A), adjoint(B)) - @deprecate Ac_mul_Bc(A::AbstractTriangular, B::AbstractMatrix) (*)(adjoint(A), adjoint(B)) - @deprecate Ac_mul_Bc(A::AbstractMatrix, B::AbstractTriangular) (*)(adjoint(A), adjoint(B)) - @deprecate At_mul_Bt(A::AbstractTriangular, B::AbstractTriangular) (*)(transpose(A), transpose(B)) - @deprecate At_mul_Bt(A::AbstractTriangular, B::AbstractMatrix) (*)(transpose(A), transpose(B)) - @deprecate At_mul_Bt(A::AbstractMatrix, B::AbstractTriangular) (*)(transpose(A), transpose(B)) - @deprecate A_mul_Bc!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) mul!(A, adjoint(B)) - @deprecate A_mul_Bt!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) mul!(A, transpose(B)) - @deprecate A_mul_Bt!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) mul!(A, transpose(B)) - @deprecate A_rdiv_Bc!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bc!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bt!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bt!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, transpose(B)) - @deprecate A_rdiv_B!(A::UpperTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, B) - @deprecate A_rdiv_B!(A::LowerTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, B) - @deprecate Ac_mul_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) mul!(adjoint(A), B) - @deprecate At_mul_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) mul!(transpose(A), B) - @deprecate At_mul_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) mul!(transpose(A), B) - @deprecate Ac_ldiv_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) ldiv!(adjoint(A), B) - @deprecate Ac_ldiv_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) ldiv!(adjoint(A), B) - @deprecate At_ldiv_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) ldiv!(transpose(A), B) - @deprecate At_ldiv_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) ldiv!(transpose(A), B) - @deprecate A_rdiv_Bt!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bt!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bt!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bt!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, adjoint(B)) - @deprecate A_rdiv_B!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, B) - @deprecate A_rdiv_B!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, B) - @deprecate A_rdiv_B!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, B) - @deprecate A_rdiv_B!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, B) - @deprecate Ac_ldiv_B!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) - @deprecate Ac_ldiv_B!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) - @deprecate Ac_ldiv_B!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) - @deprecate Ac_ldiv_B!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) - @deprecate At_ldiv_B!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) - @deprecate At_ldiv_B!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) - @deprecate At_ldiv_B!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) - @deprecate At_ldiv_B!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) - @deprecate A_mul_Bt!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, transpose(B)) - @deprecate A_mul_Bt!(A::StridedMatrix, B::LowerTriangular) mul!(A, transpose(B)) - @deprecate A_mul_Bt!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, transpose(B)) - @deprecate A_mul_Bt!(A::StridedMatrix, B::UpperTriangular) mul!(A, transpose(B)) - @deprecate A_mul_Bc!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix, B::LowerTriangular) mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix, B::UpperTriangular) mul!(A, adjoint(B)) - @deprecate A_mul_B!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, B) - @deprecate A_mul_B!(A::StridedMatrix, B::LowerTriangular) mul!(A, B) - @deprecate A_mul_B!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, B) - @deprecate A_mul_B!(A::StridedMatrix, B::UpperTriangular) mul!(A, B) - @deprecate At_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(transpose(A), B) - @deprecate At_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(transpose(A), B) - @deprecate At_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(transpose(A), B) - @deprecate At_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(transpose(A), B) - @deprecate Ac_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) - @deprecate A_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(A, B) - @deprecate A_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(A, B) - @deprecate A_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(A, B) - @deprecate A_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(A, B) - @deprecate A_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, B) - @deprecate Ac_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, adjoint(A), B) - @deprecate Ac_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, adjoint(A), B) - @deprecate Ac_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, adjoint(A), B) - @deprecate At_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, transpose(A), B) - @deprecate At_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, transpose(A), B) - @deprecate At_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, transpose(A), B) - @deprecate A_mul_B!(A::Tridiagonal, B::AbstractTriangular) mul!(A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::AbstractTriangular, B::Tridiagonal) mul!(C, A, B) - @deprecate A_mul_B!(C::AbstractMatrix, A::Tridiagonal, B::AbstractTriangular) mul!(C, A, B) - @deprecate A_mul_Bt!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, transpose(B)) - @deprecate A_mul_Bc!(C::AbstractMatrix, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) - @deprecate A_mul_Bc!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) -end -for mat in (:AbstractVector, :AbstractMatrix) - @eval Base.LinAlg begin - @deprecate Ac_mul_B(A::AbstractTriangular, B::$mat) (*)(adjoint(A), B) - @deprecate At_mul_B(A::AbstractTriangular, B::$mat) (*)(transpose(A), B) - @deprecate Ac_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::$mat) (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::$mat) (\)(transpose(A), B) - @deprecate Ac_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::$mat) (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::$mat) (\)(transpose(A), B) - @deprecate A_rdiv_Bc(A::$mat, B::Union{UnitUpperTriangular, UnitLowerTriangular}) (/)(A, adjoint(B)) - @deprecate A_rdiv_Bt(A::$mat, B::Union{UnitUpperTriangular, UnitLowerTriangular}) (/)(A, transpose(B)) - @deprecate A_rdiv_Bc(A::$mat, B::Union{UpperTriangular,LowerTriangular}) (/)(A, adjoint(B)) - @deprecate A_rdiv_Bt(A::$mat, B::Union{UpperTriangular,LowerTriangular}) (/)(A, transpose(B)) - end -end -@eval Base.LinAlg begin - @deprecate A_mul_Bc(A::AbstractMatrix, B::AbstractTriangular) (*)(A, adjoint(B)) - @deprecate A_mul_Bt(A::AbstractMatrix, B::AbstractTriangular) (*)(A, transpose(B)) -end -for (f, op, transform) in ( - (:A_mul_Bc, :*, :adjoint), - (:A_mul_Bt, :*, :transpose), - (:A_rdiv_Bc, :/, :adjoint), - (:A_rdiv_Bt, :/, :transpose)) - @eval Base.LinAlg begin - @deprecate $f(A::LowerTriangular, B::UpperTriangular) ($op)(A, ($transform)(B)) - @deprecate $f(A::LowerTriangular, B::UnitUpperTriangular) ($op)(A, ($transform)(B)) - @deprecate $f(A::UpperTriangular, B::LowerTriangular) ($op)(A, ($transform)(B)) - @deprecate $f(A::UpperTriangular, B::UnitLowerTriangular) ($op)(A, ($transform)(B)) - end -end -for (f, op, transform) in ( - (:Ac_mul_B, :*, :adjoint), - (:At_mul_B, :*, :transpose), - (:Ac_ldiv_B, :\, :adjoint), - (:At_ldiv_B, :\, :transpose)) - @eval Base.LinAlg begin - @deprecate ($f)(A::UpperTriangular, B::LowerTriangular) ($op)(($transform)(A), B) - @deprecate ($f)(A::UnitUpperTriangular, B::LowerTriangular) ($op)(($transform)(A), B) - @deprecate ($f)(A::LowerTriangular, B::UpperTriangular) ($op)(($transform)(A), B) - @deprecate ($f)(A::UnitLowerTriangular, B::UpperTriangular) ($op)(($transform)(A), B) - end -end -for (t, uploc, isunitc) in ((:LowerTriangular, 'L', 'N'), - (:UnitLowerTriangular, 'L', 'U'), - (:UpperTriangular, 'U', 'N'), - (:UnitUpperTriangular, 'U', 'U')) - @eval Base.LinAlg begin - # Vector multiplication - @deprecate A_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasFloat} mul!(A, b) - @deprecate At_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasFloat} mul!(transpose(A), b) - @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasReal} mul!(adjoint(A), b) - @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasComplex} mul!(adjoint(A), b) - - # Matrix multiplication - @deprecate A_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(A, B) - @deprecate A_mul_B!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(A, B) - - @deprecate At_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(transpose(A), B) - @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasComplex} mul!(adjoint(A), B) - @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasReal} mul!(adjoint(A), B) - - @deprecate A_mul_Bt!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(A, transpose(B)) - @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasComplex} mul!(A, adjoint(B)) - @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasReal} mul!(A, adjoint(B)) - - # Left division - @deprecate A_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) - @deprecate At_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(transpose(A), B) - @deprecate Ac_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasReal} ldiv!(adjoint(A), B) - @deprecate Ac_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(adjoint(A), B) - - # Right division - @deprecate A_rdiv_B!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} rdiv!(A, B) - @deprecate A_rdiv_Bt!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} rdiv!(A, transpose(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasReal} rdiv!(A, adjoint(B)) - @deprecate A_rdiv_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasComplex} rdiv!(A, adjoint(B)) - end -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/rowvector.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_rdiv_Bt(rowvec::RowVector, mat::AbstractMatrix) (/)(rowvec, transpose(mat)) - @deprecate A_rdiv_Bc(rowvec::RowVector, mat::AbstractMatrix) (/)(rowvec, adjoint(mat)) - @deprecate At_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) (\)(transpose(mat), rowvec) - @deprecate Ac_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) (\)(adjoint(mat), rowvec) - @deprecate Ac_mul_B(u::RowVector, v::AbstractVector) (*)(adjoint(u), v) - @deprecate Ac_mul_B(vec::AbstractVector, mat::AbstractMatrix) (*)(adjoint(vec), mat) - @deprecate Ac_mul_B(rowvec1::RowVector, rowvec2::RowVector) (*)(adjoint(rowvec1), rowvec2) - @deprecate Ac_mul_B(vec::AbstractVector, rowvec::RowVector) (*)(adjoint(vec), rowvec) - @deprecate Ac_mul_B(vec1::AbstractVector, vec2::AbstractVector) (*)(adjoint(vec1), vec2) - @deprecate Ac_mul_Bc(rowvec::RowVector, vec::AbstractVector) (*)(adjoint(rowvec), adjoint(vec)) - @deprecate Ac_mul_Bc(vec::AbstractVector, mat::AbstractMatrix) (*)(adjoint(vec), adjoint(mat)) - @deprecate Ac_mul_Bc(rowvec1::RowVector, rowvec2::RowVector) (*)(adjoint(rowvec1), adjoint(rowvec2)) - @deprecate Ac_mul_Bc(vec::AbstractVector, rowvec::RowVector) (*)(adjoint(vec), adjoint(rowvec)) - @deprecate Ac_mul_Bc(vec::AbstractVector, rowvec::AbstractVector) (*)(adjoint(vec), adjoint(rowvec)) - @deprecate Ac_mul_Bc(mat::AbstractMatrix, rowvec::RowVector) (*)(adjoint(mat), adjoint(rowvec)) - @deprecate A_mul_Bc(u::RowVector, v::AbstractVector) (*)(u, adjoint(v)) - @deprecate A_mul_Bc(rowvec::RowVector, mat::AbstractMatrix) (*)(rowvec, adjoint(mat)) - @deprecate A_mul_Bc(rowvec1::RowVector, rowvec2::RowVector) (*)(rowvec1, adjoint(rowvec2)) - @deprecate A_mul_Bc(vec::AbstractVector, rowvec::RowVector) (*)(vec, adjoint(rowvec)) - @deprecate A_mul_Bc(vec1::AbstractVector, vec2::AbstractVector) (*)(vec1, adjoint(vec2)) - @deprecate A_mul_Bc(mat::AbstractMatrix, rowvec::RowVector) (*)(mat, adjoint(rowvec)) - @deprecate At_mul_B(v::RowVector, u::AbstractVector) (*)(transpose(v), u) - @deprecate At_mul_B(vec::AbstractVector, mat::AbstractMatrix) (*)(transpose(vec), mat) - @deprecate At_mul_B(rowvec1::RowVector, rowvec2::RowVector) (*)(transpose(rowvec1), rowvec2) - @deprecate At_mul_B(vec::AbstractVector, rowvec::RowVector) (*)(transpose(vec), rowvec) - @deprecate At_mul_B(vec1::AbstractVector{T}, vec2::AbstractVector{T}) where {T<:Real} (*)(transpose(vec1), vec2) - @deprecate At_mul_B(vec1::AbstractVector, vec2::AbstractVector) (*)(transpose(vec1), vec2) - @deprecate At_mul_Bt(rowvec::RowVector, vec::AbstractVector) (*)(transpose(rowvec), transpose(vec)) - @deprecate At_mul_Bt(vec::AbstractVector, mat::AbstractMatrix) (*)(transpose(vec), transpose(mat)) - @deprecate At_mul_Bt(rowvec1::RowVector, rowvec2::RowVector) (*)(transpose(rowvec1), transpose(rowvec2)) - @deprecate At_mul_Bt(vec::AbstractVector, rowvec::RowVector) (*)(transpose(vec), transpose(rowvec)) - @deprecate At_mul_Bt(vec::AbstractVector, rowvec::AbstractVector) (*)(transpose(vec), transpose(rowvec)) - @deprecate At_mul_Bt(mat::AbstractMatrix, rowvec::RowVector) (*)(transpose(mat), transpose(rowvec)) - @deprecate A_mul_Bt(v::RowVector, A::AbstractVector) (*)(v, transpose(A)) - @deprecate A_mul_Bt(rowvec::RowVector, mat::AbstractMatrix) (*)(rowvec, transpose(mat)) - @deprecate A_mul_Bt(rowvec1::RowVector, rowvec2::RowVector) (*)(rowvec1, transpose(rowvec2)) - @deprecate A_mul_Bt(vec::AbstractVector, rowvec::RowVector) (*)(vec, transpose(rowvec)) - @deprecate A_mul_Bt(vec1::AbstractVector, vec2::AbstractVector) (*)(vec1, transpose(vec2)) - @deprecate A_mul_Bt(mat::AbstractMatrix, rowvec::RowVector) (*)(mat, transpose(rowvec)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/givens.jl, to deprecate -@eval Base.LinAlg begin - @deprecate A_mul_Bc!(A::AbstractMatrix, R::Rotation) mul!(A, adjoint(R)) - @deprecate A_mul_B!(R::Rotation, A::AbstractMatrix) mul!(R, A) - @deprecate A_mul_B!(G::Givens, R::Rotation) mul!(G, R) - @deprecate A_mul_Bc!(A::AbstractMatrix, G::Givens) mul!(A, adjoint(G)) - @deprecate A_mul_B!(G::Givens, A::AbstractVecOrMat) mul!(G, A) - @deprecate A_mul_B!(G1::Givens, G2::Givens) mul!(G1, G2) - @deprecate A_mul_Bc(A::AbstractVecOrMat{T}, R::AbstractRotation{S}) where {T,S} (*)(A, adjoint(R)) -end - - -# methods involving RowVector from base/linalg/bidiag.jl, to deprecate -@eval Base.LinAlg begin - \(::Diagonal, ::RowVector) = _mat_ldiv_rowvec_error() - \(::Bidiagonal, ::RowVector) = _mat_ldiv_rowvec_error() - \(::Bidiagonal{<:Number}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() - \(::Adjoint{<:Any,<:Bidiagonal}, ::RowVector) = _mat_ldiv_rowvec_error() - \(::Transpose{<:Any,<:Bidiagonal}, ::RowVector) = _mat_ldiv_rowvec_error() - \(::Adjoint{<:Number,<:Bidiagonal{<:Number}}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() - \(::Transpose{<:Number,<:Bidiagonal{<:Number}}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() - _mat_ldiv_rowvec_error() = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) -end - -# methods involving RowVector from base/linalg/diagonal.jl, to deprecate -@eval Base.LinAlg begin - *(rowvec::RowVector, D::Diagonal) = rvtranspose(D * rvtranspose(rowvec)) # seems potentially incorrect without also transposing D? - *(D::Diagonal, transrowvec::Transpose{<:Any,<:RowVector}) = (rowvec = transrowvec.parent; D*rvtranspose(rowvec)) - *(D::Diagonal, adjrowvec::Adjoint{<:Any,<:RowVector}) = (rowvec = adjrowvec.parent; D*rvadjoint(rowvec)) -end - -# methods involving RowVector from base/linalg/qr.jl, to deprecate -@eval Base.LinAlg begin - *(rowvec::RowVector, adjB::Adjoint{<:Any,<:AbstractQ}) = (B = adjB.parent; rvadjoint(B*rvadjoint(rowvec))) -end - -# methods involving RowVector from base/linalg/qr.jl, to deprecate -@eval Base.LinAlg begin - *(A::RowVector, B::Adjoint{<:Any,<:AbstractRotation}) = A * adjoint(B.parent) -end - -# methods involving RowVector from base/linalg/generic.jl, to deprecate -@eval Base.LinAlg begin - """ - norm(A::RowVector, q::Real=2) - - For row vectors, return the ``q``-norm of `A`, which is equivalent to the p-norm with - value `p = q/(q-1)`. They coincide at `p = q = 2`. - - The difference in norm between a vector space and its dual arises to preserve - the relationship between duality and the inner product, and the result is - consistent with the p-norm of `1 × n` matrix. - - # Examples - ```jldoctest - julia> v = [1; im]; - - julia> vc = RowVector(v); - - julia> norm(vc, 1) - 1.0 - - julia> norm(v, 1) - 2.0 - - julia> norm(vc, 2) - 1.4142135623730951 - - julia> norm(v, 2) - 1.4142135623730951 - - julia> norm(vc, Inf) - 2.0 - - julia> norm(v, Inf) - 1.0 - ``` - """ - norm(tv::RowVector, q::Real) = q == Inf ? norm(rvtranspose(tv), 1) : norm(rvtranspose(tv), q/(q-1)) - norm(tv::RowVector) = norm(rvtranspose(tv)) -end - -# methods involving RowVector from base/linalg/factorization.jl, to deprecate -@eval Base.LinAlg begin - \(A::Adjoint{<:Any,<:Factorization}, B::RowVector) = adjoint(A.parent) \ B - \(A::Transpose{<:Any,<:Factorization}, B::RowVector) = transpose(A.parent) \ B - \(A::Transpose{<:Any,<:Factorization{<:Real}}, B::RowVector) = transpose(A.parent) \ B -end - -# methods involving RowVector from base/linalg/symmetric.jl, to deprecate -@eval Base.LinAlg begin - *(A::RowVector, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = A * transB.parent - *(A::RowVector, adjB::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * adjB.parent - \(A::HermOrSym{<:Any,<:StridedMatrix}, B::RowVector) = invoke(\, Tuple{AbstractMatrix, RowVector}, A, B) - *(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:RowVector}) = A.parent * B - *(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:RowVector}) = A.parent * B - *(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:RowVector}) = A.parent * B - *(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:RowVector}) = A.parent * B -end - -# methods involving RowVector from base/linalg/triangular.jl, to deprecate -@eval Base.LinAlg begin - *(rowvec::RowVector, A::AbstractTriangular) = rvtranspose(transpose(A) * rvtranspose(rowvec)) - *(rowvec::RowVector, transA::Transpose{<:Any,<:AbstractTriangular}) = rvtranspose(transA.parent * rvtranspose(rowvec)) - *(A::AbstractTriangular, transrowvec::Transpose{<:Any,<:RowVector}) = A * rvtranspose(transrowvec.parent) - *(transA::Transpose{<:Any,<:AbstractTriangular}, transrowvec::Transpose{<:Any,<:RowVector}) = transA * rvtranspose(transrowvec.parent) - *(rowvec::RowVector, adjA::Adjoint{<:Any,<:AbstractTriangular}) = rvadjoint(adjA.parent * rvadjoint(rowvec)) - *(A::AbstractTriangular, adjrowvec::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(adjrowvec.parent) - *(adjA::Adjoint{<:Any,<:AbstractTriangular}, adjrowvec::Adjoint{<:Any,<:RowVector}) = adjA * rvadjoint(adjrowvec.parent) - \(::Union{UpperTriangular,LowerTriangular}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Union{UnitUpperTriangular,UnitLowerTriangular}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Adjoint{<:Any,<:Union{UpperTriangular,LowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Adjoint{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Transpose{<:Any,<:Union{UpperTriangular,LowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Transpose{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - /(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) = rvtranspose(transpose(A) \ rvtranspose(rowvec)) - /(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) = rvtranspose(transpose(A) \ rvtranspose(rowvec)) - /(rowvec::RowVector, transA::Transpose{<:Any,<:Union{UpperTriangular,LowerTriangular}}) = rvtranspose(transA.parent \ rvtranspose(rowvec)) - /(rowvec::RowVector, transA::Transpose{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}) = rvtranspose(transA.parent \ rvtranspose(rowvec)) - /(rowvec::RowVector, adjA::Adjoint{<:Any,<:Union{UpperTriangular,LowerTriangular}}) = /(rowvec, adjoint(adjA.parent)) - /(rowvec::RowVector, adjA::Adjoint{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}) = /(rowvec, adjoint(adjA.parent)) - *(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:RowVector}) = A * rvtranspose(B.parent) - *(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(B.parent) -end # issue #24822 @deprecate_binding Display AbstractDisplay @@ -2615,6 +1246,153 @@ end @deprecate_moved spones "SparseArrays" true true @deprecate_moved speye "SparseArrays" true true +# PR #25571: LinearAlgebra to stdlib +## the LinearAlgebra module itself is deprecated base/sysimg.jl + +## functions that where re-exported from Base +@deprecate_moved bkfact! "LinearAlgebra" true true +@deprecate_moved bkfact "LinearAlgebra" true true +@deprecate_moved chol "LinearAlgebra" true true +@deprecate_moved cholfact! "LinearAlgebra" true true +@deprecate_moved cholfact "LinearAlgebra" true true +@deprecate_moved cond "LinearAlgebra" true true +@deprecate_moved condskeel "LinearAlgebra" true true +@deprecate_moved cross "LinearAlgebra" true true +@deprecate_moved adjoint! "LinearAlgebra" true true +# @deprecate_moved adjoint "LinearAlgebra" true true +@deprecate_moved det "LinearAlgebra" true true +@deprecate_moved diag "LinearAlgebra" true true +@deprecate_moved diagind "LinearAlgebra" true true +@deprecate_moved diagm "LinearAlgebra" true true +@deprecate_moved diff "LinearAlgebra" true true +@deprecate_moved dot "LinearAlgebra" true true +@deprecate_moved eig "LinearAlgebra" true true +@deprecate_moved eigfact! "LinearAlgebra" true true +@deprecate_moved eigfact "LinearAlgebra" true true +@deprecate_moved eigmax "LinearAlgebra" true true +@deprecate_moved eigmin "LinearAlgebra" true true +@deprecate_moved eigvals "LinearAlgebra" true true +@deprecate_moved eigvals! "LinearAlgebra" true true +@deprecate_moved eigvecs "LinearAlgebra" true true +@deprecate_moved factorize "LinearAlgebra" true true +@deprecate_moved givens "LinearAlgebra" true true +@deprecate_moved hessfact! "LinearAlgebra" true true +@deprecate_moved hessfact "LinearAlgebra" true true +@deprecate_moved isdiag "LinearAlgebra" true true +@deprecate_moved ishermitian "LinearAlgebra" true true +@deprecate_moved isposdef! "LinearAlgebra" true true +@deprecate_moved isposdef "LinearAlgebra" true true +@deprecate_moved issymmetric "LinearAlgebra" true true +@deprecate_moved istril "LinearAlgebra" true true +@deprecate_moved istriu "LinearAlgebra" true true +# @deprecate_moved kron "LinearAlgebra" true true +@deprecate_moved ldltfact "LinearAlgebra" true true +@deprecate_moved ldltfact! "LinearAlgebra" true true +@deprecate_moved linreg "LinearAlgebra" true true +@deprecate_moved logabsdet "LinearAlgebra" true true +@deprecate_moved logdet "LinearAlgebra" true true +@deprecate_moved lu "LinearAlgebra" true true +@deprecate_moved lufact! "LinearAlgebra" true true +@deprecate_moved lufact "LinearAlgebra" true true +@deprecate_moved lyap "LinearAlgebra" true true +@deprecate_moved norm "LinearAlgebra" true true +@deprecate_moved normalize "LinearAlgebra" true true +@deprecate_moved normalize! "LinearAlgebra" true true +@deprecate_moved nullspace "LinearAlgebra" true true +@deprecate_moved ordschur! "LinearAlgebra" true true +@deprecate_moved ordschur "LinearAlgebra" true true +@deprecate_moved peakflops "LinearAlgebra" true true +@deprecate_moved pinv "LinearAlgebra" true true +@deprecate_moved qr "LinearAlgebra" true true +@deprecate_moved qrfact! "LinearAlgebra" true true +@deprecate_moved qrfact "LinearAlgebra" true true +@deprecate_moved lq "LinearAlgebra" true true +@deprecate_moved lqfact! "LinearAlgebra" true true +@deprecate_moved lqfact "LinearAlgebra" true true +@deprecate_moved rank "LinearAlgebra" true true +@deprecate_moved scale! "LinearAlgebra" true true +@deprecate_moved schur "LinearAlgebra" true true +@deprecate_moved schurfact! "LinearAlgebra" true true +@deprecate_moved schurfact "LinearAlgebra" true true +@deprecate_moved svd "LinearAlgebra" true true +@deprecate_moved svdfact! "LinearAlgebra" true true +@deprecate_moved svdfact "LinearAlgebra" true true +@deprecate_moved svdvals! "LinearAlgebra" true true +@deprecate_moved svdvals "LinearAlgebra" true true +@deprecate_moved sylvester "LinearAlgebra" true true +@deprecate_moved trace "LinearAlgebra" true true +@deprecate_moved transpose! "LinearAlgebra" true true +# @deprecate_moved transpose "LinearAlgebra" true true +@deprecate_moved tril! "LinearAlgebra" true true +@deprecate_moved tril "LinearAlgebra" true true +@deprecate_moved triu! "LinearAlgebra" true true +@deprecate_moved triu "LinearAlgebra" true true +@deprecate_moved vecdot "LinearAlgebra" true true +@deprecate_moved vecnorm "LinearAlgebra" true true +# @deprecate_moved ⋅ "LinearAlgebra" true true +# @deprecate_moved × "LinearAlgebra" true true + +## types that where re-exported from Base +@deprecate_moved Diagonal "LinearAlgebra" true true +@deprecate_moved Bidiagonal "LinearAlgebra" true true +@deprecate_moved Tridiagonal "LinearAlgebra" true true +@deprecate_moved SymTridiagonal "LinearAlgebra" true true +@deprecate_moved UpperTriangular "LinearAlgebra" true true +@deprecate_moved LowerTriangular "LinearAlgebra" true true +@deprecate_moved Symmetric "LinearAlgebra" true true +@deprecate_moved Hermitian "LinearAlgebra" true true +@deprecate_moved Factorization "LinearAlgebra" true true +@deprecate_moved UniformScaling "LinearAlgebra" true true +@deprecate_moved Adjoint "LinearAlgebra" true true +@deprecate_moved Transpose "LinearAlgebra" true true +@deprecate_moved I "LinearAlgebra" true true +@deprecate_moved A_mul_B! "LinearAlgebra" true true +@deprecate_moved A_mul_Bt! "LinearAlgebra" true true +@deprecate_moved At_mul_B! "LinearAlgebra" true true +@deprecate_moved At_mul_Bt! "LinearAlgebra" true true +@deprecate_moved A_mul_Bc! "LinearAlgebra" true true +@deprecate_moved Ac_mul_B! "LinearAlgebra" true true +@deprecate_moved Ac_mul_Bc! "LinearAlgebra" true true +@deprecate_moved A_ldiv_B! "LinearAlgebra" true true +@deprecate_moved At_ldiv_B! "LinearAlgebra" true true +@deprecate_moved Ac_ldiv_B! "LinearAlgebra" true true + +## functions that where exported from Base.LinAlg but not from Base +@deprecate_moved axpy! "LinearAlgebra" false true +@deprecate_moved axpby! "LinearAlgebra" false true +@deprecate_moved copy_transpose! "LinearAlgebra" false true +@deprecate_moved issuccess "LinearAlgebra" false true +@deprecate_moved transpose_type "LinearAlgebra" false true + +## types that where exported from Base.LinAlg but not from Base +@deprecate_moved BunchKaufman "LinearAlgebra" false true +@deprecate_moved Cholesky "LinearAlgebra" false true +@deprecate_moved CholeskyPivoted "LinearAlgebra" false true +@deprecate_moved Eigen "LinearAlgebra" false true +@deprecate_moved GeneralizedEigen "LinearAlgebra" false true +@deprecate_moved GeneralizedSVD "LinearAlgebra" false true +@deprecate_moved GeneralizedSchur "LinearAlgebra" false true +@deprecate_moved Hessenberg "LinearAlgebra" false true +@deprecate_moved LU "LinearAlgebra" false true +@deprecate_moved LDLt "LinearAlgebra" false true +@deprecate_moved QR "LinearAlgebra" false true +@deprecate_moved QRPivoted "LinearAlgebra" false true +@deprecate_moved LQ "LinearAlgebra" false true +@deprecate_moved Schur "LinearAlgebra" false true +@deprecate_moved SVD "LinearAlgebra" false true + +## deprecated functions that are moved to stdlib/LinearAlgebra/src/deprecated.jl +@deprecate_moved eye "LinearAlgebra" true true +@deprecate_moved sqrtm "LinearAlgebra" true true +@deprecate_moved expm "LinearAlgebra" true true +@deprecate_moved expm! "LinearAlgebra" true true +@deprecate_moved logm "LinearAlgebra" true true +@deprecate_moved gradient "LinearAlgebra" true true +@deprecate_moved ConjArray "LinearAlgebra" true true +@deprecate_moved ConjVector "LinearAlgebra" true true +@deprecate_moved ConjMatrix "LinearAlgebra" true true +@deprecate_moved RowVector "LinearAlgebra" true true + # PR #25021 @deprecate_moved normalize_string "Unicode" true true @@ -2698,15 +1476,6 @@ workspace() = error("`workspace()` is discontinued, consider Revise.jl for an al @deprecate Ref(x::Ptr) Ref(x, 1) @deprecate Ref(x::Ref) x # or perhaps, `convert(Ref, x)` -# PR #25184. Use getproperty instead of getindex for Factorizations -function getindex(F::Factorization, s::Symbol) - depwarn("`F[:$s]` is deprecated, use `F.$s` instead.", :getindex) - return getproperty(F, s) -end -@eval Base.LinAlg begin - @deprecate getq(F::Factorization) F.Q -end - # Issues #17812 Remove default stride implementation function strides(a::AbstractArray) depwarn(""" diff --git a/base/essentials.jl b/base/essentials.jl index fb4608fdaa2a3b..351496d6ba04cf 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -371,7 +371,7 @@ julia> sizeof(ComplexF64) If `T` does not have a specific size, an error is thrown. ```jldoctest -julia> sizeof(Base.LinAlg.LU) +julia> sizeof(Base.SubArray) ERROR: argument is an abstract type; size is indeterminate Stacktrace: [...] diff --git a/base/exports.jl b/base/exports.jl index 0bafae38c9349f..a349ac7c8a3061 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -8,9 +8,6 @@ export StackTraces, Sys, Libc, - LinAlg, - BLAS, - LAPACK, Serializer, Docs, Markdown, @@ -29,7 +26,6 @@ export AbstractVecOrMat, Array, AbstractDict, - Bidiagonal, BigFloat, BigInt, BitArray, @@ -46,22 +42,16 @@ export ComplexF64, ComplexF32, ComplexF16, - ConjVector, - ConjMatrix, DenseMatrix, DenseVecOrMat, DenseVector, DevNull, - Diagonal, Dict, Dims, EachLine, Enum, Enumerate, ExponentialBackOff, - Factorization, - Hermitian, - UniformScaling, IndexCartesian, IndexLinear, IndexStyle, @@ -70,7 +60,6 @@ export IOBuffer, IOStream, LinSpace, - LowerTriangular, Irrational, Matrix, MergeSort, @@ -94,8 +83,6 @@ export RoundNearestTiesUp, RoundToZero, RoundUp, - Adjoint, - Transpose, AbstractSerializer, SerializationState, Set, @@ -108,12 +95,8 @@ export StridedVector, SubArray, SubString, - Symmetric, - SymTridiagonal, Timer, - Tridiagonal, UnitRange, - UpperTriangular, Val, VecOrMat, Vector, @@ -178,7 +161,6 @@ export im, π, pi, ℯ, - I, # Operators !, @@ -501,87 +483,9 @@ export startswith, # linear algebra - bkfact!, - bkfact, - chol, - cholfact!, - cholfact, - cond, - condskeel, - cross, - adjoint!, adjoint, - det, - diag, - diagind, - diagm, - diff, - dot, - eig, - eigfact!, - eigfact, - eigmax, - eigmin, - eigvals, - eigvals!, - eigvecs, - factorize, - givens, - hessfact!, - hessfact, - isdiag, - ishermitian, - isposdef!, - isposdef, - issymmetric, - istril, - istriu, - kron, - ldltfact, - ldltfact!, - linreg, - logabsdet, - logdet, - lu, - lufact!, - lufact, - lyap, - norm, - normalize, - normalize!, - nullspace, - ordschur!, - ordschur, - peakflops, - pinv, - qr, - qrfact!, - qrfact, - lq, - lqfact!, - lqfact, - rank, - scale!, - schur, - schurfact!, - schurfact, - svd, - svdfact!, - svdfact, - svdvals!, - svdvals, - sylvester, - trace, - transpose!, transpose, - tril!, - tril, - triu!, - triu, - vecdot, - vecnorm, - ⋅, - ×, + kron, # bitarrays falses, diff --git a/base/interactiveutil.jl b/base/interactiveutil.jl index 68a92561c2cae9..88edf8e679f2c0 100644 --- a/base/interactiveutil.jl +++ b/base/interactiveutil.jl @@ -322,13 +322,13 @@ function versioninfo(io::IO=STDOUT; verbose::Bool=false, packages::Bool=false) println(io) end println(io, " WORD_SIZE: ", Sys.WORD_SIZE) - if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64 - openblas_config = BLAS.openblas_get_config() - println(io, " BLAS: libopenblas (", openblas_config, ")") - else - println(io, " BLAS: ",libblas_name) - end - println(io, " LAPACK: ",liblapack_name) + # if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64 + # openblas_config = BLAS.openblas_get_config() + # println(io, " BLAS: libopenblas (", openblas_config, ")") + # else + # println(io, " BLAS: ",libblas_name) + # end + # println(io, " LAPACK: ",liblapack_name) println(io, " LIBM: ",libm_name) println(io, " LLVM: libLLVM-",libllvm_version," (", Sys.JIT, ", ", Sys.CPU_NAME, ")") diff --git a/base/math.jl b/base/math.jl index dc0d1b276300cc..1aece123751ffc 100644 --- a/base/math.jl +++ b/base/math.jl @@ -506,7 +506,7 @@ end Compute the hypotenuse ``\\sqrt{\\sum x_i^2}`` avoiding overflow and underflow. """ -hypot(x::Number...) = vecnorm(x) +hypot(x::Number...) = sqrt(sum(abs2(y) for y in x)) """ atan2(y, x) diff --git a/base/precompile.jl b/base/precompile.jl index 64f076324ff7b1..3d2c0f32399ae7 100644 --- a/base/precompile.jl +++ b/base/precompile.jl @@ -1234,7 +1234,7 @@ precompile(Tuple{typeof(Base.join), Base.GenericIOBuffer{Array{UInt8, 1}}, Tuple precompile(Tuple{typeof(Base.setindex!), Base.Dict{Int64, Nothing}, Nothing, Int64}) precompile(Tuple{Type{Array{Union{Tuple{Any, Int64}, Tuple{Tuple{}, Any, Bool}}, 1}}, Tuple{Int64}}) precompile(Tuple{typeof(Base.eachindex), Array{Union{Tuple{Any, Int64}, Tuple{Tuple{}, Any, Bool}}, 1}}) -precompile(Tuple{typeof(Base.LinAlg.BLAS.set_num_threads), Int64}) +# precompile(Tuple{typeof(Base.LinearAlgebra.BLAS.set_num_threads), Int64}) precompile(Tuple{typeof(Base.eltype), Type{Base.Union{IO, Nothing}}}) precompile(Tuple{Type{Base.Union{IO, Nothing}}}) precompile(Tuple{typeof(Base.eltype), Type{Base.Union{AbstractString, Nothing}}}) diff --git a/base/reflection.jl b/base/reflection.jl index 97e6ba7eca6ff4..072167c645785c 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -9,8 +9,8 @@ Get the name of a `Module` as a `Symbol`. # Examples ```jldoctest -julia> module_name(Base.LinAlg) -:LinAlg +julia> module_name(Base.Broadcast) +:Broadcast ``` """ module_name(m::Module) = ccall(:jl_module_name, Ref{Symbol}, (Any,), m) @@ -25,8 +25,8 @@ Get a module's enclosing `Module`. `Main` is its own parent. julia> module_parent(Main) Main -julia> module_parent(Base.LinAlg.BLAS) -Base.LinAlg +julia> module_parent(Base.Broadcast) +Base ``` """ module_parent(m::Module) = ccall(:jl_module_parent, Ref{Module}, (Any,), m) diff --git a/base/statistics.jl b/base/statistics.jl index 80dff1d94e93dc..08cc5eb865733d 100644 --- a/base/statistics.jl +++ b/base/statistics.jl @@ -59,7 +59,8 @@ julia> mean!([1. 1.], v) """ function mean!(R::AbstractArray, A::AbstractArray) sum!(R, A; init=true) - scale!(R, max(1, _length(R)) // _length(A)) + x = max(1, _length(R)) // _length(A) + R .= R .* x return R end @@ -175,7 +176,8 @@ function varm!(R::AbstractArray{S}, A::AbstractArray, m::AbstractArray; correcte fill!(R, convert(S, NaN)) else rn = div(_length(A), _length(R)) - Int(corrected) - scale!(centralize_sumabs2!(R, A, m), 1//rn) + centralize_sumabs2!(R, A, m) + R .= R .* (1 // rn) end return R end @@ -328,7 +330,7 @@ unscaled_covzm(x::AbstractVector{<:Number}) = sum(abs2, x) unscaled_covzm(x::AbstractVector) = sum(t -> t*t', x) unscaled_covzm(x::AbstractMatrix, vardim::Int) = (vardim == 1 ? _conj(x'x) : x * x') -unscaled_covzm(x::AbstractVector, y::AbstractVector) = dot(y, x) +unscaled_covzm(x::AbstractVector, y::AbstractVector) = sum(conj(y[i])*x[i] for i in eachindex(y, x)) unscaled_covzm(x::AbstractVector, y::AbstractMatrix, vardim::Int) = (vardim == 1 ? *(transpose(x), _conj(y)) : *(transpose(x), transpose(_conj(y)))) unscaled_covzm(x::AbstractMatrix, y::AbstractVector, vardim::Int) = @@ -342,14 +344,20 @@ covzm(x::AbstractVector; corrected::Bool=true) = unscaled_covzm(x) / (_length(x) function covzm(x::AbstractMatrix, vardim::Int=1; corrected::Bool=true) C = unscaled_covzm(x, vardim) T = promote_type(typeof(first(C) / 1), eltype(C)) - return scale!(convert(AbstractMatrix{T}, C), 1//(size(x, vardim) - corrected)) + A = convert(AbstractMatrix{T}, C) + b = 1//(size(x, vardim) - corrected) + A .= A .* b + return A end covzm(x::AbstractVector, y::AbstractVector; corrected::Bool=true) = unscaled_covzm(x, y) / (_length(x) - Int(corrected)) function covzm(x::AbstractVecOrMat, y::AbstractVecOrMat, vardim::Int=1; corrected::Bool=true) C = unscaled_covzm(x, y, vardim) T = promote_type(typeof(first(C) / 1), eltype(C)) - return scale!(convert(AbstractArray{T}, C), 1//(_getnobs(x, y, vardim) - corrected)) + A = convert(AbstractArray{T}, C) + b = 1//(_getnobs(x, y, vardim) - corrected) + A .= A .* b + return A end # covm (with provided mean) @@ -467,7 +475,7 @@ end corzm(x::AbstractVector{T}) where {T} = one(real(T)) function corzm(x::AbstractMatrix, vardim::Int=1) c = unscaled_covzm(x, vardim) - return cov2cor!(c, sqrt!(diag(c))) + return cov2cor!(c, collect(sqrt(c[i,i]) for i in 1:min(size(c)...))) end corzm(x::AbstractVector, y::AbstractMatrix, vardim::Int=1) = cov2cor!(unscaled_covzm(x, y, vardim), sqrt(sum(abs2, x)), sqrt!(sum(abs2, y, vardim))) diff --git a/base/sysimg.jl b/base/sysimg.jl index c986891029ceed..af387fc2a47ece 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -460,12 +460,6 @@ import Base64 INCLUDE_STATE = 2 -# dense linear algebra -include("linalg/linalg.jl") -using .LinAlg -const ⋅ = dot -const × = cross - include("asyncmap.jl") include("multimedia.jl") @@ -520,6 +514,7 @@ Base.require(:FileWatching) Base.require(:Future) Base.require(:IterativeEigensolvers) Base.require(:Libdl) +Base.require(:LinearAlgebra) Base.require(:Logging) Base.require(:Mmap) Base.require(:Printf) @@ -551,10 +546,13 @@ Base.require(:Unicode) ", run `using SparseArrays` to load sparse array functionality") @deprecate_binding(SparseVector, root_module(:SparseArrays).SparseVector, true, ", run `using SparseArrays` to load sparse array functionality") + + # PR #25571 + @deprecate_binding LinAlg root_module(:LinearAlgebra) true ", run `using LinearAlgebra` instead" end empty!(LOAD_PATH) Base.isfile("userimg.jl") && Base.include(Main, "userimg.jl") -Base.include(Base, "precompile.jl") +# Base.include(Base, "precompile.jl") diff --git a/doc/REQUIRE b/doc/REQUIRE index f712c56ad6a54e..cda2edfe8f2021 100644 --- a/doc/REQUIRE +++ b/doc/REQUIRE @@ -1,3 +1,3 @@ -Compat 0.39.0 0.39.0+ -DocStringExtensions 0.4.1 0.4.1+ -Documenter 0.12.4 0.12.4+ +Compat +DocStringExtensions +Documenter diff --git a/doc/make.jl b/doc/make.jl index 5c6c812ea22006..8ffc3f40f57c93 100644 --- a/doc/make.jl +++ b/doc/make.jl @@ -4,6 +4,10 @@ Pkg.init() cp(joinpath(@__DIR__, "REQUIRE"), Pkg.dir("REQUIRE"); remove_destination = true) Pkg.update() Pkg.resolve() +Pkg.checkout("Documenter") +# Pkg.checkout("Documenter", "fe/deprecations") # can't push my branch to JuliaDocs... +Pkg.checkout("Compat") +Pkg.checkout("Compat", "fe/linalg") # to make Compat load using Documenter @@ -68,7 +72,6 @@ const PAGES = [ "manual/documentation.md", "manual/metaprogramming.md", "manual/arrays.md", - "manual/linear-algebra.md", "manual/missing.md", "manual/networking-and-streams.md", "manual/parallel-computing.md", @@ -98,7 +101,6 @@ const PAGES = [ "base/arrays.md", "base/parallel.md", "base/multi-threading.md", - "base/linalg.md", "base/constants.md", "base/file.md", "base/io-network.md", diff --git a/doc/src/base/arrays.md b/doc/src/base/arrays.md index c379ab6aa4ae4b..b5b89221506759 100644 --- a/doc/src/base/arrays.md +++ b/doc/src/base/arrays.md @@ -50,7 +50,6 @@ Base.IndexStyle Base.conj! Base.stride Base.strides -Base.LinAlg.checksquare ``` ## Broadcast and vectorization @@ -148,7 +147,7 @@ Base.cumprod Base.cumprod! Base.cumsum Base.cumsum! -Base.LinAlg.diff +LinearAlgebra.diff Base.repeat(::AbstractArray) Base.rot180 Base.rotl90 diff --git a/doc/src/base/base.md b/doc/src/base/base.md index 6d60c33900e943..3a1e14544f5d8e 100644 --- a/doc/src/base/base.md +++ b/doc/src/base/base.md @@ -86,13 +86,10 @@ primitive type ## Base Modules ```@docs -Base.BLAS Base.Docs Base.Iterators -Base.LAPACK Base.LibGit2 Base.Libc -Base.LinAlg Base.Markdown Base.Meta Base.Pkg diff --git a/doc/src/base/index.md b/doc/src/base/index.md index 7e19cb52132958..637a62a1c4c0c7 100644 --- a/doc/src/base/index.md +++ b/doc/src/base/index.md @@ -10,7 +10,6 @@ * [Distributed Computing](@ref) * [Shared Arrays](@ref) * [Multi-Threading](@ref) - * [Linear Algebra](@ref) * [Constants](@ref lib-constants) * [Filesystem](@ref) * [Delimited Files](@ref) diff --git a/doc/src/base/linalg.md b/doc/src/base/linalg.md deleted file mode 100644 index e6d500201aba2f..00000000000000 --- a/doc/src/base/linalg.md +++ /dev/null @@ -1,340 +0,0 @@ -# Linear Algebra - -## Standard Functions - -Linear algebra functions in Julia are largely implemented by calling functions from [LAPACK](http://www.netlib.org/lapack/). - Sparse factorizations call functions from [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html). - -```@docs -Base.:*(::AbstractMatrix, ::AbstractMatrix) -Base.:\(::AbstractMatrix, ::AbstractVecOrMat) -Base.LinAlg.dot -Base.LinAlg.vecdot -Base.LinAlg.cross -Base.LinAlg.factorize -Base.LinAlg.Diagonal -Base.LinAlg.Bidiagonal -Base.LinAlg.SymTridiagonal -Base.LinAlg.Tridiagonal -Base.LinAlg.Symmetric -Base.LinAlg.Hermitian -Base.LinAlg.LowerTriangular -Base.LinAlg.UpperTriangular -Base.LinAlg.UniformScaling -Base.LinAlg.lu -Base.LinAlg.lufact -Base.LinAlg.lufact! -Base.LinAlg.chol -Base.LinAlg.cholfact -Base.LinAlg.cholfact! -Base.LinAlg.lowrankupdate -Base.LinAlg.lowrankdowndate -Base.LinAlg.lowrankupdate! -Base.LinAlg.lowrankdowndate! -Base.LinAlg.ldltfact -Base.LinAlg.ldltfact! -Base.LinAlg.qr -Base.LinAlg.qr! -Base.LinAlg.qrfact -Base.LinAlg.qrfact! -Base.LinAlg.QR -Base.LinAlg.QRCompactWY -Base.LinAlg.QRPivoted -Base.LinAlg.lqfact! -Base.LinAlg.lqfact -Base.LinAlg.lq -Base.LinAlg.bkfact -Base.LinAlg.bkfact! -Base.LinAlg.eig -Base.LinAlg.eigvals -Base.LinAlg.eigvals! -Base.LinAlg.eigmax -Base.LinAlg.eigmin -Base.LinAlg.eigvecs -Base.LinAlg.eigfact -Base.LinAlg.eigfact! -Base.LinAlg.hessfact -Base.LinAlg.hessfact! -Base.LinAlg.schurfact -Base.LinAlg.schurfact! -Base.LinAlg.schur -Base.LinAlg.ordschur -Base.LinAlg.ordschur! -Base.LinAlg.svdfact -Base.LinAlg.svdfact! -Base.LinAlg.svd -Base.LinAlg.svdvals -Base.LinAlg.svdvals! -Base.LinAlg.Givens -Base.LinAlg.givens -Base.LinAlg.triu -Base.LinAlg.triu! -Base.LinAlg.tril -Base.LinAlg.tril! -Base.LinAlg.diagind -Base.LinAlg.diag -Base.LinAlg.diagm -Base.LinAlg.scale! -Base.LinAlg.rank -Base.LinAlg.norm -Base.LinAlg.vecnorm -Base.LinAlg.normalize! -Base.LinAlg.normalize -Base.LinAlg.cond -Base.LinAlg.condskeel -Base.LinAlg.trace -Base.LinAlg.det -Base.LinAlg.logdet -Base.LinAlg.logabsdet -Base.inv(::AbstractMatrix) -Base.LinAlg.pinv -Base.LinAlg.nullspace -Base.repmat -Base.kron -Base.LinAlg.linreg -Base.LinAlg.exp(::StridedMatrix{<:Base.LinAlg.BlasFloat}) -Base.LinAlg.log(::StridedMatrix) -Base.LinAlg.sqrt(::StridedMatrix{<:Real}) -Base.LinAlg.cos(::StridedMatrix{<:Real}) -Base.LinAlg.sin(::StridedMatrix{<:Real}) -Base.LinAlg.sincos(::StridedMatrix{<:Real}) -Base.LinAlg.tan(::StridedMatrix{<:Real}) -Base.LinAlg.sec(::StridedMatrix) -Base.LinAlg.csc(::StridedMatrix) -Base.LinAlg.cot(::StridedMatrix) -Base.LinAlg.cosh(::StridedMatrix) -Base.LinAlg.sinh(::StridedMatrix) -Base.LinAlg.tanh(::StridedMatrix) -Base.LinAlg.sech(::StridedMatrix) -Base.LinAlg.csch(::StridedMatrix) -Base.LinAlg.coth(::StridedMatrix) -Base.LinAlg.acos(::StridedMatrix) -Base.LinAlg.asin(::StridedMatrix) -Base.LinAlg.atan(::StridedMatrix) -Base.LinAlg.asec(::StridedMatrix) -Base.LinAlg.acsc(::StridedMatrix) -Base.LinAlg.acot(::StridedMatrix) -Base.LinAlg.acosh(::StridedMatrix) -Base.LinAlg.asinh(::StridedMatrix) -Base.LinAlg.atanh(::StridedMatrix) -Base.LinAlg.asech(::StridedMatrix) -Base.LinAlg.acsch(::StridedMatrix) -Base.LinAlg.acoth(::StridedMatrix) -Base.LinAlg.lyap -Base.LinAlg.sylvester -Base.LinAlg.issuccess -Base.LinAlg.issymmetric -Base.LinAlg.isposdef -Base.LinAlg.isposdef! -Base.LinAlg.istril -Base.LinAlg.istriu -Base.LinAlg.isdiag -Base.LinAlg.ishermitian -Base.LinAlg.RowVector -Base.LinAlg.ConjArray -Base.transpose -Base.transpose! -Base.adjoint -Base.adjoint! -Base.LinAlg.peakflops -Base.LinAlg.stride1 -``` - -## Low-level matrix operations - -In many cases there are in-place versions of matrix operations that allow you to supply -a pre-allocated output vector or matrix. This is useful when optimizing critical code in order -to avoid the overhead of repeated allocations. These in-place operations are suffixed with `!` -below (e.g. `mul!`) according to the usual Julia convention. - -```@docs -Base.LinAlg.mul! -Base.LinAlg.ldiv! -Base.LinAlg.rdiv! -``` - -## BLAS Functions - -In Julia (as in much of scientific computation), dense linear-algebra operations are based on -the [LAPACK library](http://www.netlib.org/lapack/), which in turn is built on top of basic linear-algebra -building-blocks known as the [BLAS](http://www.netlib.org/blas/). There are highly optimized -implementations of BLAS available for every computer architecture, and sometimes in high-performance -linear algebra routines it is useful to call the BLAS functions directly. - -`Base.LinAlg.BLAS` provides wrappers for some of the BLAS functions. Those BLAS functions -that overwrite one of the input arrays have names ending in `'!'`. Usually, a BLAS function has -four methods defined, for [`Float64`](@ref), [`Float32`](@ref), `ComplexF64`, and `ComplexF32` arrays. - -### [BLAS Character Arguments](@id stdlib-blas-chars) -Many BLAS functions accept arguments that determine whether to transpose an argument (`trans`), -which triangle of a matrix to reference (`uplo` or `ul`), -whether the diagonal of a triangular matrix can be assumed to -be all ones (`dA`) or which side of a matrix multiplication -the input argument belongs on (`side`). The possiblities are: - -#### [Multplication Order](@id stdlib-blas-side) -| `side` | Meaning | -|:-------|:--------------------------------------------------------------------| -| `'L'` | The argument goes on the *left* side of a matrix-matrix operation. | -| `'R'` | The argument goes on the *right* side of a matrix-matrix operation. | - -#### [Triangle Referencing](@id stdlib-blas-uplo) -| `uplo`/`ul` | Meaning | -|:------------|:------------------------------------------------------| -| `'U'` | Only the *upper* triangle of the matrix will be used. | -| `'L'` | Only the *lower* triangle of the matrix will be used. | - -#### [Transposition Operation](@id stdlib-blas-trans) -| `trans`/`tX` | Meaning | -|:-------------|:--------------------------------------------------------| -| `'N'` | The input matrix `X` is not transposed or conjugated. | -| `'T'` | The input matrix `X` will be transposed. | -| `'C'` | The input matrix `X` will be conjugated and transposed. | - -#### [Unit Diagonal](@id stdlib-blas-diag) -| `diag`/`dX` | Meaning | -|:------------|:----------------------------------------------------------| -| `'N'` | The diagonal values of the matrix `X` will be read. | -| `'U'` | The diagonal of the matrix `X` is assumed to be all ones. | - -```@docs -Base.LinAlg.BLAS.dotu -Base.LinAlg.BLAS.dotc -Base.LinAlg.BLAS.blascopy! -Base.LinAlg.BLAS.nrm2 -Base.LinAlg.BLAS.asum -Base.LinAlg.axpy! -Base.LinAlg.BLAS.scal! -Base.LinAlg.BLAS.scal -Base.LinAlg.BLAS.ger! -Base.LinAlg.BLAS.syr! -Base.LinAlg.BLAS.syrk! -Base.LinAlg.BLAS.syrk -Base.LinAlg.BLAS.her! -Base.LinAlg.BLAS.herk! -Base.LinAlg.BLAS.herk -Base.LinAlg.BLAS.gbmv! -Base.LinAlg.BLAS.gbmv -Base.LinAlg.BLAS.sbmv! -Base.LinAlg.BLAS.sbmv(::Any, ::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.sbmv(::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.gemm! -Base.LinAlg.BLAS.gemm(::Any, ::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.gemm(::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.gemv! -Base.LinAlg.BLAS.gemv(::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.gemv(::Any, ::Any, ::Any) -Base.LinAlg.BLAS.symm! -Base.LinAlg.BLAS.symm(::Any, ::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.symm(::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.symv! -Base.LinAlg.BLAS.symv(::Any, ::Any, ::Any, ::Any) -Base.LinAlg.BLAS.symv(::Any, ::Any, ::Any) -Base.LinAlg.BLAS.trmm! -Base.LinAlg.BLAS.trmm -Base.LinAlg.BLAS.trsm! -Base.LinAlg.BLAS.trsm -Base.LinAlg.BLAS.trmv! -Base.LinAlg.BLAS.trmv -Base.LinAlg.BLAS.trsv! -Base.LinAlg.BLAS.trsv -Base.LinAlg.BLAS.set_num_threads -Base.LinAlg.I -``` - -## LAPACK Functions - -`Base.LinAlg.LAPACK` provides wrappers for some of the LAPACK functions for linear algebra. - Those functions that overwrite one of the input arrays have names ending in `'!'`. - -Usually a function has 4 methods defined, one each for [`Float64`](@ref), [`Float32`](@ref), -`ComplexF64` and `ComplexF32` arrays. - -Note that the LAPACK API provided by Julia can and will change in the future. Since this API is -not user-facing, there is no commitment to support/deprecate this specific set of functions in -future releases. - -```@docs -Base.LinAlg.LAPACK.gbtrf! -Base.LinAlg.LAPACK.gbtrs! -Base.LinAlg.LAPACK.gebal! -Base.LinAlg.LAPACK.gebak! -Base.LinAlg.LAPACK.gebrd! -Base.LinAlg.LAPACK.gelqf! -Base.LinAlg.LAPACK.geqlf! -Base.LinAlg.LAPACK.geqrf! -Base.LinAlg.LAPACK.geqp3! -Base.LinAlg.LAPACK.gerqf! -Base.LinAlg.LAPACK.geqrt! -Base.LinAlg.LAPACK.geqrt3! -Base.LinAlg.LAPACK.getrf! -Base.LinAlg.LAPACK.tzrzf! -Base.LinAlg.LAPACK.ormrz! -Base.LinAlg.LAPACK.gels! -Base.LinAlg.LAPACK.gesv! -Base.LinAlg.LAPACK.getrs! -Base.LinAlg.LAPACK.getri! -Base.LinAlg.LAPACK.gesvx! -Base.LinAlg.LAPACK.gelsd! -Base.LinAlg.LAPACK.gelsy! -Base.LinAlg.LAPACK.gglse! -Base.LinAlg.LAPACK.geev! -Base.LinAlg.LAPACK.gesdd! -Base.LinAlg.LAPACK.gesvd! -Base.LinAlg.LAPACK.ggsvd! -Base.LinAlg.LAPACK.ggsvd3! -Base.LinAlg.LAPACK.geevx! -Base.LinAlg.LAPACK.ggev! -Base.LinAlg.LAPACK.gtsv! -Base.LinAlg.LAPACK.gttrf! -Base.LinAlg.LAPACK.gttrs! -Base.LinAlg.LAPACK.orglq! -Base.LinAlg.LAPACK.orgqr! -Base.LinAlg.LAPACK.orgql! -Base.LinAlg.LAPACK.orgrq! -Base.LinAlg.LAPACK.ormlq! -Base.LinAlg.LAPACK.ormqr! -Base.LinAlg.LAPACK.ormql! -Base.LinAlg.LAPACK.ormrq! -Base.LinAlg.LAPACK.gemqrt! -Base.LinAlg.LAPACK.posv! -Base.LinAlg.LAPACK.potrf! -Base.LinAlg.LAPACK.potri! -Base.LinAlg.LAPACK.potrs! -Base.LinAlg.LAPACK.pstrf! -Base.LinAlg.LAPACK.ptsv! -Base.LinAlg.LAPACK.pttrf! -Base.LinAlg.LAPACK.pttrs! -Base.LinAlg.LAPACK.trtri! -Base.LinAlg.LAPACK.trtrs! -Base.LinAlg.LAPACK.trcon! -Base.LinAlg.LAPACK.trevc! -Base.LinAlg.LAPACK.trrfs! -Base.LinAlg.LAPACK.stev! -Base.LinAlg.LAPACK.stebz! -Base.LinAlg.LAPACK.stegr! -Base.LinAlg.LAPACK.stein! -Base.LinAlg.LAPACK.syconv! -Base.LinAlg.LAPACK.sysv! -Base.LinAlg.LAPACK.sytrf! -Base.LinAlg.LAPACK.sytri! -Base.LinAlg.LAPACK.sytrs! -Base.LinAlg.LAPACK.hesv! -Base.LinAlg.LAPACK.hetrf! -Base.LinAlg.LAPACK.hetri! -Base.LinAlg.LAPACK.hetrs! -Base.LinAlg.LAPACK.syev! -Base.LinAlg.LAPACK.syevr! -Base.LinAlg.LAPACK.sygvd! -Base.LinAlg.LAPACK.bdsqr! -Base.LinAlg.LAPACK.bdsdc! -Base.LinAlg.LAPACK.gecon! -Base.LinAlg.LAPACK.gehrd! -Base.LinAlg.LAPACK.orghr! -Base.LinAlg.LAPACK.gees! -Base.LinAlg.LAPACK.gges! -Base.LinAlg.LAPACK.trexc! -Base.LinAlg.LAPACK.trsen! -Base.LinAlg.LAPACK.tgsen! -Base.LinAlg.LAPACK.trsyl! -``` diff --git a/doc/src/index.md b/doc/src/index.md index 7e2882ee8a6246..11af7ef880d41d 100644 --- a/doc/src/index.md +++ b/doc/src/index.md @@ -30,7 +30,6 @@ Please read the [release notes](NEWS.md) to see what has changed since the last * [Documentation](@ref) * [Metaprogramming](@ref) * [Multi-dimensional Arrays](@ref man-multi-dim-arrays) - * [Linear Algebra](@ref) * [Missing Values](@ref missing) * [Networking and Streams](@ref) * [Parallel Computing](@ref) @@ -63,7 +62,6 @@ Please read the [release notes](NEWS.md) to see what has changed since the last * [Distributed Computing](@ref) * [Multi-Threading](@ref) * [Shared Arrays](@ref) - * [Linear Algebra](@ref) * [Constants](@ref lib-constants) * [Filesystem](@ref) * [I/O and Network](@ref) @@ -91,6 +89,7 @@ Please read the [release notes](NEWS.md) to see what has changed since the last * [Profiling](@ref lib-profiling) * [Random Numbers](@ref) * [Shared Arrays](@ref) + * [Linear Algebra](@ref) * [Sparse Arrays](@ref) * [Unicode](@ref) * [Unit Testing](@ref) diff --git a/doc/src/manual/index.md b/doc/src/manual/index.md index 0eb4406168086a..a23117dd2c1e01 100644 --- a/doc/src/manual/index.md +++ b/doc/src/manual/index.md @@ -19,7 +19,6 @@ * [Documentation](@ref) * [Metaprogramming](@ref) * [Multi-dimensional Arrays](@ref man-multi-dim-arrays) - * [Linear algebra](@ref) * [Missing Values](@ref missing) * [Networking and Streams](@ref) * [Parallel Computing](@ref) diff --git a/stdlib/Distributed/src/cluster.jl b/stdlib/Distributed/src/cluster.jl index b4dc463a040c32..e0ac4cc66e6440 100644 --- a/stdlib/Distributed/src/cluster.jl +++ b/stdlib/Distributed/src/cluster.jl @@ -163,8 +163,9 @@ mutable struct LocalProcess end +import LinearAlgebra function disable_threaded_libs() - BLAS.set_num_threads(1) + LinearAlgebra.BLAS.set_num_threads(1) end worker_timeout() = parse(Float64, get(ENV, "JULIA_WORKER_TIMEOUT", "60.0")) diff --git a/stdlib/Distributed/test/distributed_exec.jl b/stdlib/Distributed/test/distributed_exec.jl index 35586c9ce0844d..05489890b7866d 100644 --- a/stdlib/Distributed/test/distributed_exec.jl +++ b/stdlib/Distributed/test/distributed_exec.jl @@ -18,7 +18,7 @@ include(joinpath(Sys.BINDIR, "..", "share", "julia", "test", "testenv.jl")) addprocs_with_testenv(4) @test nprocs() == 5 -@everywhere using Test, Random +@everywhere using Test, Random, LinearAlgebra id_me = myid() id_other = filter(x -> x != id_me, procs())[rand(1:(nprocs()-1))] @@ -916,7 +916,7 @@ end # Test addprocs enable_threaded_blas parameter const get_num_threads = function() # anonymous so it will be serialized when called - blas = BLAS.vendor() + blas = LinearAlgebra.BLAS.vendor() # Wrap in a try to catch unsupported blas versions try if blas == :openblas diff --git a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl index 6bee43e55d1ab0..a9138bf66fb3ec 100644 --- a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl +++ b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl @@ -7,7 +7,10 @@ Arnoldi and Lanczos iteration for computing eigenvalues """ module IterativeEigensolvers -using Base.LinAlg: BlasFloat, BlasInt, SVD, checksquare, mul! +using LinearAlgebra: BlasFloat, BlasInt, SVD, checksquare, mul!, + UniformScaling, issymmetric, ishermitian, + factorize, I, scale!, qr +import LinearAlgebra export eigs, svds @@ -205,14 +208,14 @@ function SVDAugmented(A::AbstractMatrix{T}) where T SVDAugmented{Tnew,typeof(Anew)}(Anew) end -function Base.LinAlg.mul!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where T +function LinearAlgebra.mul!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where T m, mn = size(A.X, 1), length(x) mul!( view(y, 1:m), A.X, view(x, m + 1:mn)) # left singular vector mul!(view(y, m + 1:mn), adjoint(A.X), view(x, 1:m)) # right singular vector return y end Base.size(A::SVDAugmented) = ((+)(size(A.X)...), (+)(size(A.X)...)) -Base.ishermitian(A::SVDAugmented) = true +LinearAlgebra.ishermitian(A::SVDAugmented) = true struct AtA_or_AAt{T,S} <: AbstractArray{T, 2} A::S @@ -225,7 +228,7 @@ function AtA_or_AAt(A::AbstractMatrix{T}) where T AtA_or_AAt{Tnew,typeof(Anew)}(Anew, Vector{Tnew}(uninitialized, max(size(A)...))) end -function Base.LinAlg.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where T +function LinearAlgebra.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where T if size(A.A, 1) >= size(A.A, 2) mul!(A.buffer, A.A, x) return mul!(y, adjoint(A.A), A.buffer) @@ -235,7 +238,7 @@ function Base.LinAlg.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVecto end end Base.size(A::AtA_or_AAt) = ntuple(i -> min(size(A.A)...), Val(2)) -Base.ishermitian(s::AtA_or_AAt) = true +LinearAlgebra.ishermitian(s::AtA_or_AAt) = true svds(A::AbstractMatrix{<:BlasFloat}; kwargs...) = _svds(A; kwargs...) diff --git a/stdlib/IterativeEigensolvers/src/arpack.jl b/stdlib/IterativeEigensolvers/src/arpack.jl index ced0680b8d199d..98b69c1ecdde06 100644 --- a/stdlib/IterativeEigensolvers/src/arpack.jl +++ b/stdlib/IterativeEigensolvers/src/arpack.jl @@ -2,7 +2,7 @@ module ARPACK -import ..LinAlg: BlasInt, ARPACKException +import LinearAlgebra: BlasInt, ARPACKException ## aupd and eupd wrappers diff --git a/stdlib/IterativeEigensolvers/src/deprecated.jl b/stdlib/IterativeEigensolvers/src/deprecated.jl index dd98ce051db62d..e48331e7ae75b5 100644 --- a/stdlib/IterativeEigensolvers/src/deprecated.jl +++ b/stdlib/IterativeEigensolvers/src/deprecated.jl @@ -1,6 +1,4 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -@eval IterativeEigensolvers begin - Base.A_mul_B!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where {T} = Base.LinAlg.mul!(y, A, x) - Base.A_mul_B!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where {T} = Base.LinAlg.mul!(y, A, x) -end +LinearAlgebra.A_mul_B!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where {T} = LinearAlgebra.mul!(y, A, x) +LinearAlgebra.A_mul_B!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where {T} = LinearAlgebra.mul!(y, A, x) diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index c61d875aeadb46..6e8e5508669274 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using IterativeEigensolvers -using Test, SparseArrays, Random +using Test, LinearAlgebra, SparseArrays, Random @testset "eigs" begin srand(1234) @@ -33,7 +33,7 @@ using Test, SparseArrays, Random (d,v) = eigs(a, nev=3) @test a*v[:,2] ≈ d[2]*v[:,2] @test norm(v) > testtol # eigenvectors cannot be null vectors - (d,v) = eigs(a, I, nev=3) # test eigs(A, B; kwargs...) + (d,v) = eigs(a, LinearAlgebra.I, nev=3) # test eigs(A, B; kwargs...) @test a*v[:,2] ≈ d[2]*v[:,2] @test norm(v) > testtol # eigenvectors cannot be null vectors @test_logs (:warn,"Use symbols instead of strings for specifying which eigenvalues to compute") eigs(a, which="LM") @@ -42,7 +42,7 @@ using Test, SparseArrays, Random # (d,v) = eigs(a, b, nev=3, tol=1e-8) # not handled yet # @test a*v[:,2] ≈ d[2]*b*v[:,2] atol=testtol # @test norm(v) > testtol # eigenvectors cannot be null vectors - if elty <: Base.LinAlg.BlasComplex + if elty <: LinearAlgebra.BlasComplex sr_ind = indmin(real.(a_evs)) (d, v) = eigs(a, nev=1, which=:SR) @test d[1] ≈ a_evs[sr_ind] @@ -130,15 +130,15 @@ let A6965 = [ end # Example from Quantum Information Theory -import Base: size, issymmetric, ishermitian +import Base: size -mutable struct CPM{T<:Base.LinAlg.BlasFloat} <: AbstractMatrix{T} # completely positive map +mutable struct CPM{T<:LinearAlgebra.BlasFloat} <: AbstractMatrix{T} # completely positive map kraus::Array{T,3} # kraus operator representation end size(Phi::CPM) = (size(Phi.kraus,1)^2,size(Phi.kraus,3)^2) -issymmetric(Phi::CPM) = false -ishermitian(Phi::CPM) = false -function Base.LinAlg.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:Base.LinAlg.BlasFloat} +LinearAlgebra.issymmetric(Phi::CPM) = false +LinearAlgebra.ishermitian(Phi::CPM) = false +function LinearAlgebra.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:LinearAlgebra.BlasFloat} rho = reshape(rho,(size(Phi.kraus,3),size(Phi.kraus,3))) rho1 = zeros(T,(size(Phi.kraus,1),size(Phi.kraus,1))) for s = 1:size(Phi.kraus,2) @@ -147,7 +147,7 @@ function Base.LinAlg.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{ end return copyto!(rho2,rho1) end -Base.LinAlg.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:Base.LinAlg.BlasFloat} = Base.LinAlg.mul!(rho2, Phi, rho) +LinearAlgebra.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:LinearAlgebra.BlasFloat} = LinearAlgebra.mul!(rho2, Phi, rho) # after the A_mul_B! deprecation, remove this A_mul_B! def let diff --git a/doc/src/manual/linear-algebra.md b/stdlib/LinearAlgebra/docs/src/index.md similarity index 53% rename from doc/src/manual/linear-algebra.md rename to stdlib/LinearAlgebra/docs/src/index.md index 549fd74507e052..2bdd8028c95790 100644 --- a/doc/src/manual/linear-algebra.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -1,4 +1,4 @@ -# Linear algebra +# Linear Algebra In addition to (and as part of) its support for multi-dimensional arrays, Julia provides native implementations of many common and useful linear algebra operations. Basic operations, such as [`trace`](@ref), [`det`](@ref), @@ -56,7 +56,7 @@ julia> A = [1.5 2 -4; 3 -1 -6; -10 2.3 4] -10.0 2.3 4.0 julia> factorize(A) -Base.LinAlg.LU{Float64,Array{Float64,2}} with factors L and U: +LinearAlgebra.LU{Float64,Array{Float64,2}} with factors L and U: [1.0 0.0 0.0; -0.15 1.0 0.0; -0.3 -0.132196 1.0] [-10.0 2.3 4.0; 0.0 2.345 -3.4; 0.0 0.0 -5.24947] ``` @@ -72,14 +72,14 @@ julia> B = [1.5 2 -4; 2 -1 -3; -4 -3 5] -4.0 -3.0 5.0 julia> factorize(B) -Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}} +LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}} D factor: 3×3 Tridiagonal{Float64,Array{Float64,1}}: -1.64286 0.0 ⋅ 0.0 -2.8 0.0 ⋅ 0.0 5.0 U factor: -3×3 Base.LinAlg.UnitUpperTriangular{Float64,Array{Float64,2}}: +3×3 LinearAlgebra.UnitUpperTriangular{Float64,Array{Float64,2}}: 1.0 0.142857 -0.8 0.0 1.0 -0.6 0.0 0.0 1.0 @@ -260,8 +260,8 @@ compute the factorization of a matrix into a product of matrices, and are one of in linear algebra. The following table summarizes the types of matrix factorizations that have been implemented in -Julia. Details of their associated methods can be found in the [Linear Algebra](@ref) section -of the Julia Base documentation. +Julia. Details of their associated methods can be found in the [Standard Functions](@ref) section +of the Linear Algebra documentation. | Type | Description | |:----------------- |:-------------------------------------------------------------------------------------------------------------- | @@ -276,3 +276,348 @@ of the Julia Base documentation. | `Eigen` | [Spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_(matrix)) | | `SVD` | [Singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) | | `GeneralizedSVD` | [Generalized SVD](https://en.wikipedia.org/wiki/Generalized_singular_value_decomposition#Higher_order_version) | + + + + +## Standard Functions + +Linear algebra functions in Julia are largely implemented by calling functions from [LAPACK](http://www.netlib.org/lapack/). + Sparse factorizations call functions from [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html). + +```@docs +Base.:*(::AbstractMatrix, ::AbstractMatrix) +Base.:\(::AbstractMatrix, ::AbstractVecOrMat) +LinearAlgebra.dot +LinearAlgebra.vecdot +LinearAlgebra.cross +LinearAlgebra.factorize +LinearAlgebra.Diagonal +LinearAlgebra.Bidiagonal +LinearAlgebra.SymTridiagonal +LinearAlgebra.Tridiagonal +LinearAlgebra.Symmetric +LinearAlgebra.Hermitian +LinearAlgebra.LowerTriangular +LinearAlgebra.UpperTriangular +LinearAlgebra.UniformScaling +LinearAlgebra.lu +LinearAlgebra.lufact +LinearAlgebra.lufact! +LinearAlgebra.chol +LinearAlgebra.cholfact +LinearAlgebra.cholfact! +LinearAlgebra.lowrankupdate +LinearAlgebra.lowrankdowndate +LinearAlgebra.lowrankupdate! +LinearAlgebra.lowrankdowndate! +LinearAlgebra.ldltfact +LinearAlgebra.ldltfact! +LinearAlgebra.qr +LinearAlgebra.qr! +LinearAlgebra.qrfact +LinearAlgebra.qrfact! +LinearAlgebra.QR +LinearAlgebra.QRCompactWY +LinearAlgebra.QRPivoted +LinearAlgebra.lqfact! +LinearAlgebra.lqfact +LinearAlgebra.lq +LinearAlgebra.bkfact +LinearAlgebra.bkfact! +LinearAlgebra.eig +LinearAlgebra.eigvals +LinearAlgebra.eigvals! +LinearAlgebra.eigmax +LinearAlgebra.eigmin +LinearAlgebra.eigvecs +LinearAlgebra.eigfact +LinearAlgebra.eigfact! +LinearAlgebra.hessfact +LinearAlgebra.hessfact! +LinearAlgebra.schurfact +LinearAlgebra.schurfact! +LinearAlgebra.schur +LinearAlgebra.ordschur +LinearAlgebra.ordschur! +LinearAlgebra.svdfact +LinearAlgebra.svdfact! +LinearAlgebra.svd +LinearAlgebra.svdvals +LinearAlgebra.svdvals! +LinearAlgebra.Givens +LinearAlgebra.givens +LinearAlgebra.triu +LinearAlgebra.triu! +LinearAlgebra.tril +LinearAlgebra.tril! +LinearAlgebra.diagind +LinearAlgebra.diag +LinearAlgebra.diagm +LinearAlgebra.scale! +LinearAlgebra.rank +LinearAlgebra.norm +LinearAlgebra.vecnorm +LinearAlgebra.normalize! +LinearAlgebra.normalize +LinearAlgebra.cond +LinearAlgebra.condskeel +LinearAlgebra.trace +LinearAlgebra.det +LinearAlgebra.logdet +LinearAlgebra.logabsdet +Base.inv(::AbstractMatrix) +LinearAlgebra.pinv +LinearAlgebra.nullspace +Base.repmat +Base.kron +LinearAlgebra.linreg +LinearAlgebra.exp(::StridedMatrix{<:LinearAlgebra.BlasFloat}) +LinearAlgebra.log(::StridedMatrix) +LinearAlgebra.sqrt(::StridedMatrix{<:Real}) +LinearAlgebra.cos(::StridedMatrix{<:Real}) +LinearAlgebra.sin(::StridedMatrix{<:Real}) +LinearAlgebra.sincos(::StridedMatrix{<:Real}) +LinearAlgebra.tan(::StridedMatrix{<:Real}) +LinearAlgebra.sec(::StridedMatrix) +LinearAlgebra.csc(::StridedMatrix) +LinearAlgebra.cot(::StridedMatrix) +LinearAlgebra.cosh(::StridedMatrix) +LinearAlgebra.sinh(::StridedMatrix) +LinearAlgebra.tanh(::StridedMatrix) +LinearAlgebra.sech(::StridedMatrix) +LinearAlgebra.csch(::StridedMatrix) +LinearAlgebra.coth(::StridedMatrix) +LinearAlgebra.acos(::StridedMatrix) +LinearAlgebra.asin(::StridedMatrix) +LinearAlgebra.atan(::StridedMatrix) +LinearAlgebra.asec(::StridedMatrix) +LinearAlgebra.acsc(::StridedMatrix) +LinearAlgebra.acot(::StridedMatrix) +LinearAlgebra.acosh(::StridedMatrix) +LinearAlgebra.asinh(::StridedMatrix) +LinearAlgebra.atanh(::StridedMatrix) +LinearAlgebra.asech(::StridedMatrix) +LinearAlgebra.acsch(::StridedMatrix) +LinearAlgebra.acoth(::StridedMatrix) +LinearAlgebra.lyap +LinearAlgebra.sylvester +LinearAlgebra.issuccess +LinearAlgebra.issymmetric +LinearAlgebra.isposdef +LinearAlgebra.isposdef! +LinearAlgebra.istril +LinearAlgebra.istriu +LinearAlgebra.isdiag +LinearAlgebra.ishermitian +LinearAlgebra.RowVector +LinearAlgebra.ConjArray +Base.transpose +LinearAlgebra.transpose! +Base.adjoint +LinearAlgebra.adjoint! +LinearAlgebra.peakflops +LinearAlgebra.stride1 +LinearAlgebra.checksquare +``` + +## Low-level matrix operations + +In many cases there are in-place versions of matrix operations that allow you to supply +a pre-allocated output vector or matrix. This is useful when optimizing critical code in order +to avoid the overhead of repeated allocations. These in-place operations are suffixed with `!` +below (e.g. `mul!`) according to the usual Julia convention. + +```@docs +LinearAlgebra.mul! +LinearAlgebra.ldiv! +LinearAlgebra.rdiv! +``` + +## BLAS Functions + +In Julia (as in much of scientific computation), dense linear-algebra operations are based on +the [LAPACK library](http://www.netlib.org/lapack/), which in turn is built on top of basic linear-algebra +building-blocks known as the [BLAS](http://www.netlib.org/blas/). There are highly optimized +implementations of BLAS available for every computer architecture, and sometimes in high-performance +linear algebra routines it is useful to call the BLAS functions directly. + +`LinearAlgebra.BLAS` provides wrappers for some of the BLAS functions. Those BLAS functions +that overwrite one of the input arrays have names ending in `'!'`. Usually, a BLAS function has +four methods defined, for [`Float64`](@ref), [`Float32`](@ref), `ComplexF64`, and `ComplexF32` arrays. + +### [BLAS Character Arguments](@id stdlib-blas-chars) +Many BLAS functions accept arguments that determine whether to transpose an argument (`trans`), +which triangle of a matrix to reference (`uplo` or `ul`), +whether the diagonal of a triangular matrix can be assumed to +be all ones (`dA`) or which side of a matrix multiplication +the input argument belongs on (`side`). The possiblities are: + +#### [Multplication Order](@id stdlib-blas-side) +| `side` | Meaning | +|:-------|:--------------------------------------------------------------------| +| `'L'` | The argument goes on the *left* side of a matrix-matrix operation. | +| `'R'` | The argument goes on the *right* side of a matrix-matrix operation. | + +#### [Triangle Referencing](@id stdlib-blas-uplo) +| `uplo`/`ul` | Meaning | +|:------------|:------------------------------------------------------| +| `'U'` | Only the *upper* triangle of the matrix will be used. | +| `'L'` | Only the *lower* triangle of the matrix will be used. | + +#### [Transposition Operation](@id stdlib-blas-trans) +| `trans`/`tX` | Meaning | +|:-------------|:--------------------------------------------------------| +| `'N'` | The input matrix `X` is not transposed or conjugated. | +| `'T'` | The input matrix `X` will be transposed. | +| `'C'` | The input matrix `X` will be conjugated and transposed. | + +#### [Unit Diagonal](@id stdlib-blas-diag) +| `diag`/`dX` | Meaning | +|:------------|:----------------------------------------------------------| +| `'N'` | The diagonal values of the matrix `X` will be read. | +| `'U'` | The diagonal of the matrix `X` is assumed to be all ones. | + +```@docs +LinearAlgebra.BLAS +LinearAlgebra.BLAS.dotu +LinearAlgebra.BLAS.dotc +LinearAlgebra.BLAS.blascopy! +LinearAlgebra.BLAS.nrm2 +LinearAlgebra.BLAS.asum +LinearAlgebra.axpy! +LinearAlgebra.BLAS.scal! +LinearAlgebra.BLAS.scal +LinearAlgebra.BLAS.ger! +LinearAlgebra.BLAS.syr! +LinearAlgebra.BLAS.syrk! +LinearAlgebra.BLAS.syrk +LinearAlgebra.BLAS.her! +LinearAlgebra.BLAS.herk! +LinearAlgebra.BLAS.herk +LinearAlgebra.BLAS.gbmv! +LinearAlgebra.BLAS.gbmv +LinearAlgebra.BLAS.sbmv! +LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.gemm! +LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.gemv! +LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any) +LinearAlgebra.BLAS.symm! +LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.symv! +LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any, ::Any) +LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any) +LinearAlgebra.BLAS.trmm! +LinearAlgebra.BLAS.trmm +LinearAlgebra.BLAS.trsm! +LinearAlgebra.BLAS.trsm +LinearAlgebra.BLAS.trmv! +LinearAlgebra.BLAS.trmv +LinearAlgebra.BLAS.trsv! +LinearAlgebra.BLAS.trsv +LinearAlgebra.BLAS.set_num_threads +LinearAlgebra.I +``` + +## LAPACK Functions + +`LinearAlgebra.LAPACK` provides wrappers for some of the LAPACK functions for linear algebra. + Those functions that overwrite one of the input arrays have names ending in `'!'`. + +Usually a function has 4 methods defined, one each for [`Float64`](@ref), [`Float32`](@ref), +`ComplexF64` and `ComplexF32` arrays. + +Note that the LAPACK API provided by Julia can and will change in the future. Since this API is +not user-facing, there is no commitment to support/deprecate this specific set of functions in +future releases. + +```@docs +LinearAlgebra.LAPACK +LinearAlgebra.LAPACK.gbtrf! +LinearAlgebra.LAPACK.gbtrs! +LinearAlgebra.LAPACK.gebal! +LinearAlgebra.LAPACK.gebak! +LinearAlgebra.LAPACK.gebrd! +LinearAlgebra.LAPACK.gelqf! +LinearAlgebra.LAPACK.geqlf! +LinearAlgebra.LAPACK.geqrf! +LinearAlgebra.LAPACK.geqp3! +LinearAlgebra.LAPACK.gerqf! +LinearAlgebra.LAPACK.geqrt! +LinearAlgebra.LAPACK.geqrt3! +LinearAlgebra.LAPACK.getrf! +LinearAlgebra.LAPACK.tzrzf! +LinearAlgebra.LAPACK.ormrz! +LinearAlgebra.LAPACK.gels! +LinearAlgebra.LAPACK.gesv! +LinearAlgebra.LAPACK.getrs! +LinearAlgebra.LAPACK.getri! +LinearAlgebra.LAPACK.gesvx! +LinearAlgebra.LAPACK.gelsd! +LinearAlgebra.LAPACK.gelsy! +LinearAlgebra.LAPACK.gglse! +LinearAlgebra.LAPACK.geev! +LinearAlgebra.LAPACK.gesdd! +LinearAlgebra.LAPACK.gesvd! +LinearAlgebra.LAPACK.ggsvd! +LinearAlgebra.LAPACK.ggsvd3! +LinearAlgebra.LAPACK.geevx! +LinearAlgebra.LAPACK.ggev! +LinearAlgebra.LAPACK.gtsv! +LinearAlgebra.LAPACK.gttrf! +LinearAlgebra.LAPACK.gttrs! +LinearAlgebra.LAPACK.orglq! +LinearAlgebra.LAPACK.orgqr! +LinearAlgebra.LAPACK.orgql! +LinearAlgebra.LAPACK.orgrq! +LinearAlgebra.LAPACK.ormlq! +LinearAlgebra.LAPACK.ormqr! +LinearAlgebra.LAPACK.ormql! +LinearAlgebra.LAPACK.ormrq! +LinearAlgebra.LAPACK.gemqrt! +LinearAlgebra.LAPACK.posv! +LinearAlgebra.LAPACK.potrf! +LinearAlgebra.LAPACK.potri! +LinearAlgebra.LAPACK.potrs! +LinearAlgebra.LAPACK.pstrf! +LinearAlgebra.LAPACK.ptsv! +LinearAlgebra.LAPACK.pttrf! +LinearAlgebra.LAPACK.pttrs! +LinearAlgebra.LAPACK.trtri! +LinearAlgebra.LAPACK.trtrs! +LinearAlgebra.LAPACK.trcon! +LinearAlgebra.LAPACK.trevc! +LinearAlgebra.LAPACK.trrfs! +LinearAlgebra.LAPACK.stev! +LinearAlgebra.LAPACK.stebz! +LinearAlgebra.LAPACK.stegr! +LinearAlgebra.LAPACK.stein! +LinearAlgebra.LAPACK.syconv! +LinearAlgebra.LAPACK.sysv! +LinearAlgebra.LAPACK.sytrf! +LinearAlgebra.LAPACK.sytri! +LinearAlgebra.LAPACK.sytrs! +LinearAlgebra.LAPACK.hesv! +LinearAlgebra.LAPACK.hetrf! +LinearAlgebra.LAPACK.hetri! +LinearAlgebra.LAPACK.hetrs! +LinearAlgebra.LAPACK.syev! +LinearAlgebra.LAPACK.syevr! +LinearAlgebra.LAPACK.sygvd! +LinearAlgebra.LAPACK.bdsqr! +LinearAlgebra.LAPACK.bdsdc! +LinearAlgebra.LAPACK.gecon! +LinearAlgebra.LAPACK.gehrd! +LinearAlgebra.LAPACK.orghr! +LinearAlgebra.LAPACK.gees! +LinearAlgebra.LAPACK.gges! +LinearAlgebra.LAPACK.trexc! +LinearAlgebra.LAPACK.trsen! +LinearAlgebra.LAPACK.tgsen! +LinearAlgebra.LAPACK.trsyl! +``` diff --git a/base/linalg/linalg.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl similarity index 90% rename from base/linalg/linalg.jl rename to stdlib/LinearAlgebra/src/LinearAlgebra.jl index 99d308b764426e..c82d6f67d8b9a3 100644 --- a/base/linalg/linalg.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -1,11 +1,13 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +__precompile__(true) + """ Linear algebra module. Provides array arithmetic, matrix factorizations and other linear algebra related functionality. """ -module LinAlg +module LinearAlgebra import Base: \, /, *, ^, +, -, == import Base: USE_BLAS64, abs, acos, acosh, acot, acoth, acsc, acsch, adjoint, asec, asech, @@ -176,7 +178,7 @@ julia> A = [1,2,3,4] 3 4 -julia> Base.LinAlg.stride1(A) +julia> LinearAlgebra.stride1(A) 1 julia> B = view(A, 2:2:4) @@ -184,7 +186,7 @@ julia> B = view(A, 2:2:4) 2 4 -julia> Base.LinAlg.stride1(B) +julia> LinearAlgebra.stride1(B) 2 ``` """ @@ -197,7 +199,7 @@ stride1(x::DenseArray) = stride(x, 1)::Int @inline _chkstride1(ok::Bool, A, B...) = _chkstride1(ok & (stride1(A) == 1), B...) """ - LinAlg.checksquare(A) + LinearAlgebra.checksquare(A) Check that a matrix is square, then return its common dimension. For multiple arguments, return a vector. @@ -206,7 +208,7 @@ For multiple arguments, return a vector. ```jldoctest julia> A = fill(1, (4,4)); B = fill(1, (5,5)); -julia> LinAlg.checksquare(A, B) +julia> LinearAlgebra.checksquare(A, B) 2-element Array{Int64,1}: 4 5 @@ -306,7 +308,22 @@ include("special.jl") include("bitarray.jl") include("ldlt.jl") include("schur.jl") +include("deprecated.jl") + +const ⋅ = dot +const × = cross +export ⋅, × + +function versioninfo(io::IO=STDOUT) + if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64 + openblas_config = BLAS.openblas_get_config() + println(io, "BLAS: libopenblas (", openblas_config, ")") + else + println(io, "BLAS: ",Base.libblas_name) + end + println(io, "LAPACK: ",Base.liblapack_name) +end function __init__() try @@ -316,8 +333,8 @@ function __init__() end catch ex Base.showerror_nostdio(ex, - "WARNING: Error during initialization of module LinAlg") + "WARNING: Error during initialization of module LinearAlgebra") end end -end # module LinAlg +end # module LinearAlgebra diff --git a/base/linalg/adjtrans.jl b/stdlib/LinearAlgebra/src/adjtrans.jl similarity index 100% rename from base/linalg/adjtrans.jl rename to stdlib/LinearAlgebra/src/adjtrans.jl diff --git a/base/linalg/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl similarity index 100% rename from base/linalg/bidiag.jl rename to stdlib/LinearAlgebra/src/bidiag.jl diff --git a/base/linalg/bitarray.jl b/stdlib/LinearAlgebra/src/bitarray.jl similarity index 100% rename from base/linalg/bitarray.jl rename to stdlib/LinearAlgebra/src/bitarray.jl diff --git a/base/linalg/blas.jl b/stdlib/LinearAlgebra/src/blas.jl similarity index 99% rename from base/linalg/blas.jl rename to stdlib/LinearAlgebra/src/blas.jl index 36a39ae9895b5a..6b551b245402f1 100644 --- a/base/linalg/blas.jl +++ b/stdlib/LinearAlgebra/src/blas.jl @@ -63,7 +63,8 @@ export const libblas = Base.libblas_name const liblapack = Base.liblapack_name -import ..LinAlg: BlasReal, BlasComplex, BlasFloat, BlasInt, DimensionMismatch, checksquare, stride1, chkstride1, axpy! +import LinearAlgebra +import LinearAlgebra: BlasReal, BlasComplex, BlasFloat, BlasInt, DimensionMismatch, checksquare, stride1, chkstride1, axpy! import Libdl @@ -149,7 +150,7 @@ function check() # # Check if BlasInt is the expected bitsize, by triggering an error # - (_, info) = LinAlg.LAPACK.potrf!('U', [1.0 0.0; 0.0 -1.0]) + (_, info) = LinearAlgebra.LAPACK.potrf!('U', [1.0 0.0; 0.0 -1.0]) if info != 2 # mangled info code if info == 2^33 error("BLAS and LAPACK are compiled with 32-bit integer support, but Julia expects 64-bit integers. Please build Julia with USE_BLAS64=0.") diff --git a/base/linalg/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl similarity index 97% rename from base/linalg/bunchkaufman.jl rename to stdlib/LinearAlgebra/src/bunchkaufman.jl index fc36a1e888c254..57ad722f99efd2 100644 --- a/base/linalg/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -59,13 +59,13 @@ julia> A = [1 2; 2 3] 2 3 julia> bkfact(A) -Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}} +LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}} D factor: 2×2 Tridiagonal{Float64,Array{Float64,1}}: -0.333333 0.0 0.0 3.0 U factor: -2×2 Base.LinAlg.UnitUpperTriangular{Float64,Array{Float64,2}}: +2×2 LinearAlgebra.UnitUpperTriangular{Float64,Array{Float64,2}}: 1.0 0.666667 0.0 1.0 permutation: @@ -135,14 +135,14 @@ julia> A = [1 2 3; 2 1 2; 3 2 1] 3 2 1 julia> F = bkfact(Symmetric(A, :L)) -Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}} +LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}} D factor: 3×3 Tridiagonal{Float64,Array{Float64,1}}: 1.0 3.0 ⋅ 3.0 1.0 0.0 ⋅ 0.0 -1.0 L factor: -3×3 Base.LinAlg.UnitLowerTriangular{Float64,Array{Float64,2}}: +3×3 LinearAlgebra.UnitLowerTriangular{Float64,Array{Float64,2}}: 1.0 0.0 0.0 0.0 1.0 0.0 0.5 0.5 1.0 diff --git a/base/linalg/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl similarity index 98% rename from base/linalg/cholesky.jl rename to stdlib/LinearAlgebra/src/cholesky.jl index 6ed5cdf6b2fc44..ee83fac8e5942c 100644 --- a/base/linalg/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -135,7 +135,7 @@ end # chol!. Destructive methods for computing Cholesky factor of real symmetric or Hermitian # matrix function chol!(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) - C, info = _chol!(A.uplo == 'U' ? A.data : LinAlg.copytri!(A.data, 'L', true), UpperTriangular) + C, info = _chol!(A.uplo == 'U' ? A.data : LinearAlgebra.copytri!(A.data, 'L', true), UpperTriangular) @assertposdef C info end function chol!(A::StridedMatrix) @@ -153,7 +153,7 @@ function chol(A::RealHermSymComplexHerm) if A.uplo == 'U' copyto!(AA, A.data) else - Base.adjoint!(AA, A.data) + adjoint!(AA, A.data) end chol!(Hermitian(AA, :U)) end @@ -298,7 +298,7 @@ julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.] -16.0 -43.0 98.0 julia> C = cholfact(A) -Base.LinAlg.Cholesky{Float64,Array{Float64,2}} +LinearAlgebra.Cholesky{Float64,Array{Float64,2}} U factor: 3×3 UpperTriangular{Float64,Array{Float64,2}}: 2.0 6.0 -8.0 @@ -616,7 +616,7 @@ function lowrankdowndate!(C::Cholesky, v::StridedVector) s = conj(v[i]/Aii) s2 = abs2(s) if s2 > 1 - throw(LinAlg.PosDefException(i)) + throw(LinearAlgebra.PosDefException(i)) end c = sqrt(1 - abs2(s)) diff --git a/base/linalg/conjarray.jl b/stdlib/LinearAlgebra/src/conjarray.jl similarity index 100% rename from base/linalg/conjarray.jl rename to stdlib/LinearAlgebra/src/conjarray.jl diff --git a/base/linalg/dense.jl b/stdlib/LinearAlgebra/src/dense.jl similarity index 100% rename from base/linalg/dense.jl rename to stdlib/LinearAlgebra/src/dense.jl diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl new file mode 100644 index 00000000000000..6e63a0d3530169 --- /dev/null +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -0,0 +1,1264 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Base: @deprecate, depwarn + +# BEGIN 0.7 deprecations + +@deprecate cond(F::LinearAlgebra.LU, p::Integer) cond(convert(AbstractArray, F), p) + +# PR #22188 +@deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false)) +@deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo)) +@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false)) +@deprecate cholfact(A::StridedMatrix, uplo::Symbol) cholfact(Hermitian(A, uplo)) +@deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{true}}; tol = 0.0) cholfact!(Hermitian(A, uplo), Val(true), tol = tol) +@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{true}}; tol = 0.0) cholfact(Hermitian(A, uplo), Val(true), tol = tol) + +# PR #22245 +@deprecate isposdef(A::AbstractMatrix, UL::Symbol) isposdef(Hermitian(A, UL)) +@deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) + +# bkfact +import .LinearAlgebra: bkfact, bkfact! +function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) + depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", + "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), + :bkfact) + return bkfact(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) +end +function bkfact!(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) + depwarn(string("`bkfact!` with uplo and symmetric arguments is deprecated, ", + "use `bkfact!($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), + :bkfact!) + return bkfact!(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) +end + +@deprecate sqrtm(A::UpperTriangular{T},::Type{Val{realmatrix}}) where {T,realmatrix} sqrtm(A, Val(realmatrix)) +@deprecate lufact(A::AbstractMatrix, ::Type{Val{false}}) lufact(A, Val(false)) +@deprecate lufact(A::AbstractMatrix, ::Type{Val{true}}) lufact(A, Val(true)) +@deprecate lufact!(A::AbstractMatrix, ::Type{Val{false}}) lufact!(A, Val(false)) +@deprecate lufact!(A::AbstractMatrix, ::Type{Val{true}}) lufact!(A, Val(true)) +@deprecate qrfact(A::AbstractMatrix, ::Type{Val{false}}) qrfact(A, Val(false)) +@deprecate qrfact(A::AbstractMatrix, ::Type{Val{true}}) qrfact(A, Val(true)) +@deprecate qrfact!(A::AbstractMatrix, ::Type{Val{false}}) qrfact!(A, Val(false)) +@deprecate qrfact!(A::AbstractMatrix, ::Type{Val{true}}) qrfact!(A, Val(true)) +@deprecate cholfact(A::AbstractMatrix, ::Type{Val{false}}) cholfact(A, Val(false)) +@deprecate cholfact(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact(A, Val(true); tol = tol) +@deprecate cholfact!(A::AbstractMatrix, ::Type{Val{false}}) cholfact!(A, Val(false)) +@deprecate cholfact!(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact!(A, Val(true); tol = tol) + +# PR #22703 +@deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, isupper::Bool) Bidiagonal(dv, ev, ifelse(isupper, :U, :L)) +@deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, uplo::Char) Bidiagonal(dv, ev, ifelse(uplo == 'U', :U, :L)) +@deprecate Bidiagonal(A::AbstractMatrix, isupper::Bool) Bidiagonal(A, ifelse(isupper, :U, :L)) + +# PR #22925 +# also uncomment constructor tests in test/linalg/bidiag.jl +function Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T,S} + depwarn(string("`Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T, S}`", + " is deprecated, manually convert both vectors to the same type instead."), :Bidiagonal) + R = promote_type(T, S) + Bidiagonal(convert(Vector{R}, dv), convert(Vector{R}, ev), uplo) +end + +# PR #23035 +# also uncomment constructor tests in test/linalg/tridiag.jl +function SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) where {T,S} + depwarn(string("`SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) ", + "where {T, S}` is deprecated, convert both vectors to the same type instead."), :SymTridiagonal) + R = promote_type(T, S) + SymTridiagonal(convert(Vector{R}, dv), convert(Vector{R}, ev)) +end + +# PR #23154 +# also uncomment constructor tests in test/linalg/tridiag.jl +function Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) where {Tl,Td,Tu} + depwarn(string("`Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) ", + "where {Tl, Td, Tu}` is deprecated, convert all vectors to the same type instead."), :Tridiagonal) + Tridiagonal(map(v->convert(Vector{promote_type(Tl,Td,Tu)}, v), (dl, d, du))...) +end + +# deprecate sqrtm in favor of sqrt +@deprecate sqrtm sqrt + +# deprecate expm in favor of exp +@deprecate expm! exp! +@deprecate expm exp + +# deprecate logm in favor of log +@deprecate logm log + +# PR #23373 +@deprecate diagm(A::BitMatrix) BitMatrix(Diagonal(vec(A))) + +# PR 23341 +@eval LinearAlgebra.LAPACK @deprecate laver() version() false + +# deprecate zeros(D::Diagonal[, opts...]) +import Base: zeros +function zeros(D::Diagonal) + depwarn(string("`zeros(D::Diagonal)` is deprecated, use ", + "`Diagonal(fill!(similar(D.diag), 0))` instead, or ", + "`Diagonal(fill!(similar(D.diag), zero(eltype(D.diag))))` where necessary."), :zeros) + return Diagonal(fill!(similar(D.diag), zero(eltype(D.diag)))) +end +function zeros(D::Diagonal, ::Type{T}) where {T} + depwarn(string("`zeros(D::Diagonal, ::Type{T}) where T` is deprecated, use ", + "`Diagonal(fill!(similar(D.diag, T), 0))` instead, or ", + "`Diagonal(fill!(similar(D.diag, T), zero(T)))` where necessary."), :zeros) + return Diagonal(fill!(similar(D.diag, T), zero(T))) +end +function zeros(D::Diagonal, ::Type{T}, dims::Dims) where {T} + depwarn(string("`zeros(D::Diagonal, ::Type{T}, dims::Dims) where T` is deprecated, ", + "use `fill!(similar(D, T, dims), 0)` instead, or ", + "`fill!(similar(D, T, dims), zero(T))` where necessary."), :zeros) + return fill!(similar(D, T, dims), zero(T)) +end +function zeros(D::Diagonal, ::Type{T}, dims::Integer...) where {T} + depwarn(string("`zeros(D::Diagonal, ::Type{T}, dims::Integer...) where T` is deprecated, ", + "use `fill!(similar(D, T, dims), 0)` instead, or ", + "`fill!(similar(D, T, dims), zero(T))` where necessary."), :zeros) + return fill!(similar(D, T, dims), zero(T)) +end + +## goodbeye, eye! +export eye +function eye(m::Integer) + depwarn(string("`eye(m::Integer)` has been deprecated in favor of `I` and `Matrix` ", + "constructors. For a direct replacement, consider `Matrix(1.0I, m, m)` or ", + "`Matrix{Float64}(I, m, m)`. If `Float64` element type is not necessary, ", + "consider the shorter `Matrix(I, m, m)` (with default `eltype(I)` `Bool`)."), :eye) + return Matrix{Float64}(I, m, m) +end +function eye(::Type{T}, m::Integer) where T + depwarn(string("`eye(T::Type, m::Integer)` has been deprecated in favor of `I` and ", + "`Matrix` constructors. For a direct replacement, consider `Matrix{T}(I, m, m)`. If ", + "`T` element type is not necessary, consider the shorter `Matrix(I, m, m)`", + "(with default `eltype(I)` `Bool`)"), :eye) + return Matrix{T}(I, m, m) +end +function eye(m::Integer, n::Integer) + depwarn(string("`eye(m::Integer, n::Integer)` has been deprecated in favor of `I` and ", + "`Matrix` constructors. For a direct replacement, consider `Matrix(1.0I, m, n)` ", + "or `Matrix{Float64}(I, m, n)`. If `Float64` element type is not necessary, ", + "consider the shorter `Matrix(I, m, n)` (with default `eltype(I)` `Bool`)."), :eye) + return Matrix{Float64}(I, m, n) +end +function eye(::Type{T}, m::Integer, n::Integer) where T + depwarn(string("`eye(T::Type, m::Integer, n::Integer)` has been deprecated in favor of ", + "`I` and `Matrix` constructors. For a direct replacement, consider `Matrix{T}(I, m, n)`.", + "If `T` element type is not necessary, consider the shorter `Matrix(I, m, n)` ", + "(with default `eltype(I)` `Bool`)."), :eye) + return Matrix{T}(I, m, n) +end +function eye(A::AbstractMatrix{T}) where T + depwarn(string("`eye(A::AbstractMatrix{T}) where T` has been deprecated in favor of `I` and ", + "`Matrix` constructors. For a direct replacement, consider `Matrix{eltype(A)}(I, size(A))`.", + "If `eltype(A)` element type is not necessary, consider the shorter `Matrix(I, size(A))` ", + "(with default `eltype(I)` `Bool`)."), :eye) + return Matrix(one(T)I, size(A)) +end +function eye(::Type{Diagonal{T}}, n::Int) where T + depwarn(string("`eye(DT::Type{Diagonal{T}}, n::Int) where T` has been deprecated in favor of `I` ", + "and `Diagonal` constructors. For a direct replacement, consider `Diagonal{T}(I, n)`. ", + "If `T` element type is not necessary, consider the shorter `Diagonal(I, n)` ", + "(with default `eltype(I)` `Bool`)."), :eye) + return Diagonal{T}(I, n) +end + +# PR #23816: deprecation of gradient +export gradient +function gradient(args...) + Base.depwarn("`gradient` is deprecated and will be removed in the next release.", :gradient) + return _gradient(args...) +end +_gradient(F::BitVector) = _gradient(Array(F)) +_gradient(F::BitVector, h::Real) = _gradient(Array(F), h) +_gradient(F::Vector, h::BitVector) = _gradient(F, Array(h)) +_gradient(F::BitVector, h::Vector) = _gradient(Array(F), h) +_gradient(F::BitVector, h::BitVector) = _gradient(Array(F), Array(h)) +function _gradient(F::AbstractVector, h::Vector) + n = length(F) + T = typeof(oneunit(eltype(F))/oneunit(eltype(h))) + g = similar(F, T) + if n == 1 + g[1] = zero(T) + elseif n > 1 + g[1] = (F[2] - F[1]) / (h[2] - h[1]) + g[n] = (F[n] - F[n-1]) / (h[end] - h[end-1]) + if n > 2 + h = h[3:n] - h[1:n-2] + g[2:n-1] = (F[3:n] - F[1:n-2]) ./ h + end + end + g +end +_gradient(F::AbstractVector) = _gradient(F, [1:length(F);]) +_gradient(F::AbstractVector, h::Real) = _gradient(F, [h*(1:length(F));]) + +# deprecate odd fill! methods +@deprecate fill!(D::Diagonal, x) LinearAlgebra.fillstored!(D, x) +@deprecate fill!(A::AbstractTriangular, x) LinearAlgebra.fillstored!(A, x) + +# PR #25030 +@deprecate fillslots! fillstored! false + +function diagm(v::BitVector) + depwarn(string("`diagm(v::BitVector)` is deprecated, use `diagm(0 => v)` or ", + "`BitMatrix(Diagonal(v))` instead."), :diagm) + return BitMatrix(Diagonal(v)) +end +function diagm(v::AbstractVector) + depwarn(string("`diagm(v::AbstractVector)` is deprecated, use `diagm(0 => v)` or ", + "`Matrix(Diagonal(v))` instead."), :diagm) + return Matrix(Diagonal(v)) +end +@deprecate diagm(v::AbstractVector, k::Integer) diagm(k => v) +@deprecate diagm(x::Number) fill(x, 1, 1) + +## deprecate full +import Base: full +# full for structured arrays +function full(A::Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}) + mattypestr = isa(A, Diagonal) ? "Diagonal" : + isa(A, Bidiagonal) ? "Bidiagonal" : + isa(A, Tridiagonal) ? "Tridiagonal" : + isa(A, SymTridiagonal) ? "SymTridiagonal" : + error("should not be reachable!") + depwarn(string( + "`full(A::$(mattypestr))` (and `full` in general) has been deprecated. ", + "To replace `full(A::$(mattypestr))`, consider `Matrix(A)` or, if that ", + "option is too narrow, `Array(A)`. Also consider `SparseMatrixCSC(A)` ", + "or, if that option is too narrow, `sparse(A)`."), :full) + return Matrix(A) +end + +# full for factorizations +function full(F::Union{LinearAlgebra.LU,LinearAlgebra.LQ,LinearAlgebra.QR,LinearAlgebra.QRPivoted,LinearAlgebra.QRCompactWY, + LinearAlgebra.SVD,LinearAlgebra.LDLt,LinearAlgebra.Schur,LinearAlgebra.Eigen,LinearAlgebra.Hessenberg, + LinearAlgebra.Cholesky,LinearAlgebra.CholeskyPivoted}) + facttypestr = isa(F, LinearAlgebra.LU) ? "LU" : + isa(F, LinearAlgebra.LQ) ? "LQ" : + isa(F, LinearAlgebra.QR) ? "QR" : + isa(F, LinearAlgebra.QRPivoted) ? "QRPivoted" : + isa(F, LinearAlgebra.QRCompactWY) ? "QRCompactWY" : + isa(F, LinearAlgebra.SVD) ? "SVD" : + isa(F, LinearAlgebra.LDLt) ? "LDLt" : + isa(F, LinearAlgebra.Schur) ? "Schur" : + isa(F, LinearAlgebra.Eigen) ? "Eigen" : + isa(F, LinearAlgebra.Hessenberg) ? "Hessenberg" : + isa(F, LinearAlgebra.Cholesky) ? "Cholesky" : + isa(F, LinearAlgebra.CholeskyPivoted) ? "CholeskyPivoted" : + error("should not be reachable!") + depwarn(string( + "`full(F::$(facttypestr))` (and `full` in general) has been deprecated. ", + "To replace `full(F::$(facttypestr))`, consider `Matrix(F)`, `AbstractMatrix(F)` or, ", + "if those options are too narrow, `Array(F)` or `AbstractArray(F)`."), :full) + return AbstractMatrix(F) +end + +# full for implicit orthogonal factors +function full(Q::LinearAlgebra.HessenbergQ) + depwarn(string( + "`full(Q::HessenbergQ)` (and `full` in general) has been deprecated. ", + "To replace `full(Q::HessenbergQ)`, consider `Matrix(Q)` or, ", + "if that option is too narrow, `Array(Q)`."), :full) + return Matrix(Q) +end +function full(Q::LinearAlgebra.LQPackedQ; thin::Bool = true) + depwarn(string( + "`full(Q::LQPackedQ; thin::Bool = true)` (and `full` in general) ", + "has been deprecated. To replace `full(Q::LQPackedQ, true)`, ", + "consider `Matrix(Q)` or `Array(Q)`. To replace `full(Q::LQPackedQ, false)`, ", + "consider `LinearAlgebra.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2)))`."), :full) + return thin ? Array(Q) : LinearAlgebra.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2))) +end +function full(Q::Union{LinearAlgebra.QRPackedQ,LinearAlgebra.QRCompactWYQ}; thin::Bool = true) + qtypestr = isa(Q, LinearAlgebra.QRPackedQ) ? "QRPackedQ" : + isa(Q, LinearAlgebra.QRCompactWYQ) ? "QRCompactWYQ" : + error("should not be reachable!") + depwarn(string( + "`full(Q::$(qtypestr); thin::Bool = true)` (and `full` in general) ", + "has been deprecated. To replace `full(Q::$(qtypestr), true)`, ", + "consider `Matrix(Q)` or `Array(Q)`. To replace `full(Q::$(qtypestr), false)`, ", + "consider `LinearAlgebra.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 1), size(Q.factors, 1)))`."), :full) + return thin ? Array(Q) : LinearAlgebra.mul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 1), size(Q.factors, 1))) +end + +# full for symmetric / hermitian / triangular wrappers +function full(A::Symmetric) + depwarn(string( + "`full(A::Symmetric)` (and `full` in general) has been deprecated. ", + "To replace `full(A::Symmetric)`, as appropriate consider `Matrix(A)`, ", + "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", + "or `LinearAlgebra.copytri!(copy(parent(A)), A.uplo)`."), :full) + return Matrix(A) +end +function full(A::Hermitian) + depwarn(string( + "`full(A::Hermitian)` (and `full` in general) has been deprecated. ", + "To replace `full(A::Hermitian)`, as appropriate consider `Matrix(A)`, ", + "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", + "or `LinearAlgebra.copytri!(copy(parent(A)), A.uplo, true)`."), :full) + return Matrix(A) +end +function full(A::Union{UpperTriangular,LowerTriangular}) + (tritypestr, tri!str) = + isa(A, UpperTriangular) ? ("UpperTriangular", "triu!") : + isa(A, LowerTriangular) ? ("LowerTriangular", "tril!") : + error("should not be reachable!") + depwarn(string( + "`full(A::$(tritypestr))` (and `full` in general) has been deprecated. ", + "To replace `full(A::$(tritypestr))`, as appropriate consider `Matrix(A)`, ", + "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, `copyto!(similar(parent(A)), A)`, ", + "or `$(tri!str)(copy(parent(A)))`."), :full) + return Matrix(A) +end +function full(A::Union{LinearAlgebra.UnitUpperTriangular,LinearAlgebra.UnitLowerTriangular}) + tritypestr = isa(A, LinearAlgebra.UnitUpperTriangular) ? "LinearAlgebra.UnitUpperTriangular" : + isa(A, LinearAlgebra.UnitLowerTriangular) ? "LinearAlgebra.UnitLowerTriangular" : + error("should not be reachable!") + depwarn(string( + "`full(A::$(tritypestr))` (and `full` in general) has been deprecated. ", + "To replace `full(A::$(tritypestr))`, as appropriate consider `Matrix(A)`, ", + "`Array(A)`, `SparseMatrixCSC(A)`, `sparse(A)`, or `copyto!(similar(parent(A)), A)`."), :full) + return Matrix(A) +end + +# TODO: after 0.7, remove thin keyword argument and associated logic from... +# (1) stdlib/LinearAlgebra/src/svd.jl +# (2) stdlib/LinearAlgebra/src/qr.jl +# (3) stdlib/LinearAlgebra/src/lq.jl + + +@deprecate chol!(x::Number, uplo) chol(x) false + +@deprecate diff(A::AbstractMatrix) diff(A, 1) + +### deprecations for lazier, less jazzy linalg transition in the next several blocks ### + +# deprecate ConjArray +# TODO: between 0.7 and 1.0 remove +# 1) the type definitions in stdlib/LinearAlgebra/src/conjarray.jl +# 2) the include("conjarray.jl") from stdlib/LinearAlgebra/src/LinearAlgebra.jl +# 3) the file stdlib/LinearAlgebra/conjarray.jl itself +export ConjArray, ConjVector, ConjMatrix + +function ConjArray(a::AbstractArray{T,N}) where {T,N} + Base.depwarn(_ConjArray_depstring(), :ConjArray) + return ConjArray{conj_type(T),N,typeof(a)}(a) +end +function ConjVector(v::AbstractVector{T}) where {T} + Base.depwarn(_ConjArray_depstring(), :ConjArray) + return ConjArray{conj_type(T),1,typeof(v)}(v) +end +function ConjMatrix(m::AbstractMatrix{T}) where {T} + Base.depwarn(_ConjArray_depstring(), :ConjArray) + return ConjArray{conj_type(T),2,typeof(m)}(m) +end + +_ConjArray_depstring() = string("`ConjRowVector` and `RowVector` have been deprecated in favor ", + "of `Adjoint` and `Transpose`, and, as part of the implementation of `ConjRowVector`", + "/`RowVector`, `ConjArray`s have been deprecated as well. Please see 0.7's NEWS.md ", + "for a more detailed explanation of the associated changes.") + +# This type can cause the element type to change under conjugation - e.g. an array of complex arrays. +@inline conj_type(x) = conj_type(typeof(x)) +@inline conj_type(::Type{T}) where {T} = promote_op(conj, T) + +@inline parent(c::ConjArray) = c.parent +@inline parent_type(c::ConjArray) = parent_type(typeof(c)) +@inline parent_type(::Type{ConjArray{T,N,A}}) where {T,N,A} = A + +@inline size(a::ConjArray) = size(a.parent) +IndexStyle(::CA) where {CA<:ConjArray} = IndexStyle(parent_type(CA)) +IndexStyle(::Type{CA}) where {CA<:ConjArray} = IndexStyle(parent_type(CA)) + +@propagate_inbounds getindex(a::ConjArray{T,N}, i::Int) where {T,N} = conj(getindex(a.parent, i)) +@propagate_inbounds getindex(a::ConjArray{T,N}, i::Vararg{Int,N}) where {T,N} = conj(getindex(a.parent, i...)) +@propagate_inbounds setindex!(a::ConjArray{T,N}, v, i::Int) where {T,N} = setindex!(a.parent, conj(v), i) +@propagate_inbounds setindex!(a::ConjArray{T,N}, v, i::Vararg{Int,N}) where {T,N} = setindex!(a.parent, conj(v), i...) + +@inline similar(a::ConjArray, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(a), T, dims) + +# Currently, this is default behavior for RowVector only +@inline conj(a::ConjArray) = parent(a) + +# Helper functions, currently used by RowVector +@inline _conj(a::AbstractArray) = ConjArray(a) +@inline _conj(a::AbstractArray{T}) where {T<:Real} = a +@inline _conj(a::ConjArray) = parent(a) +@inline _conj(a::ConjArray{T}) where {T<:Real} = parent(a) + +# deprecate ConjRowVector/RowVector +# TODO: between 0.7 and 1.0 remove +# 1) the type definitions in stdlib/LinearAlgebra/src/rowvector.jl +# 2) the include("rowvector.jl") from stdlib/LinearAlgebra/src/LinearAlgebra.jl +# 3) the file stdlib/LinearAlgebra/src/rowvector.jl itself +# 4) the RowVectors in the Unions in stdlib/SparseArrays/src/sparsevector.jl around lines 995, 1010, 1011, and 1012 +export RowVector + +_RowVector_depstring() = string("`ConjRowVector` and `RowVector` have been deprecated in favor ", + "of `Adjoint` and `Transpose`. Please see 0.7's NEWS.md for a more detailed explanation ", + "of the associated changes.") + +@inline check_types(::Type{T1}, ::AbstractVector{T2}) where {T1,T2} = check_types(T1, T2) +@pure check_types(::Type{T1}, ::Type{T2}) where {T1,T2} = T1 === transpose_type(T2) ? nothing : + error("Element type mismatch. Tried to create a `RowVector{$T1}` from an `AbstractVector{$T2}`") + +# The element type may be transformed as transpose is recursive +@inline transpose_type(::Type{T}) where {T} = promote_op(transpose, T) + +# Constructors that take a vector +function RowVector(vec::AbstractVector{T}) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return RowVector{transpose_type(T),typeof(vec)}(vec) +end +function RowVector{T}(vec::AbstractVector{T}) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return RowVector{T,typeof(vec)}(vec) +end + +# Constructors that take a size and default to Array +function RowVector{T}(::Uninitialized, n::Int) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return RowVector{T}(Vector{transpose_type(T)}(uninitialized, n)) +end +function RowVector{T}(::Uninitialized, n1::Int, n2::Int) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return n1 == 1 ? RowVector{T}(Vector{transpose_type(T)}(uninitialized, n2)) : + error("RowVector expects 1×N size, got ($n1,$n2)") +end +function RowVector{T}(::Uninitialized, n::Tuple{Int}) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return RowVector{T}(Vector{transpose_type(T)}(uninitialized, n[1])) +end +function RowVector{T}(::Uninitialized, n::Tuple{Int,Int}) where {T} + Base.depwarn(_RowVector_depstring(), :RowVector) + return n[1] == 1 ? RowVector{T}(Vector{transpose_type(T)}(uninitialized, n[2])) : + error("RowVector expects 1×N size, got $n") +end + +# Conversion of underlying storage +convert(::Type{RowVector{T,V}}, rowvec::RowVector) where {T,V<:AbstractVector} = + RowVector{T,V}(convert(V,rowvec.vec)) + +# similar tries to maintain the RowVector wrapper and the parent type +@inline similar(rowvec::RowVector) = RowVector(similar(parent(rowvec))) +@inline similar(rowvec::RowVector, ::Type{T}) where {T} = RowVector(similar(parent(rowvec), transpose_type(T))) + +# Resizing similar currently loses its RowVector property. +@inline similar(rowvec::RowVector, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(rowvec), T, dims) + +# Basic methods + +# replaced in the Adjoint/Transpose transition +# """ +# transpose(v::AbstractVector) +# +# The transposition operator (`.'`). +# +# # Examples +# ```jldoctest +# julia> v = [1,2,3] +# 3-element Array{Int64,1}: +# 1 +# 2 +# 3 +# +# julia> transpose(v) +# 1×3 RowVector{Int64,Array{Int64,1}}: +# 1 2 3 +# ``` +# """ +# @inline transpose(vec::AbstractVector) = RowVector(vec) +# @inline adjoint(vec::AbstractVector) = RowVector(_conj(vec)) + +# methods necessary to preserve RowVector's behavior through the Adjoint/Transpose transition +rvadjoint(v::AbstractVector) = RowVector(_conj(v)) +rvtranspose(v::AbstractVector) = RowVector(v) +rvadjoint(v::RowVector) = conj(v.vec) +rvadjoint(v::RowVector{<:Real}) = v.vec +rvtranspose(v::RowVector) = v.vec +rvtranspose(v::ConjRowVector) = copy(v.vec) +rvadjoint(x) = adjoint(x) +rvtranspose(x) = transpose(x) + +@inline transpose(rowvec::RowVector) = rowvec.vec +@inline transpose(rowvec::ConjRowVector) = copy(rowvec.vec) # remove the ConjArray wrapper from any raw vector +@inline adjoint(rowvec::RowVector) = conj(rowvec.vec) +@inline adjoint(rowvec::RowVector{<:Real}) = rowvec.vec + +parent(rowvec::RowVector) = rowvec.vec +vec(rowvec::RowVector) = rowvec.vec + +""" + conj(v::RowVector) + +Return a [`ConjArray`](@ref) lazy view of the input, where each element is conjugated. + +# Examples +```jldoctest +julia> v = RowVector([1+im, 1-im]) +1×2 RowVector{Complex{Int64},Array{Complex{Int64},1}}: + 1+1im 1-1im + +julia> conj(v) +1×2 RowVector{Complex{Int64},ConjArray{Complex{Int64},1,Array{Complex{Int64},1}}}: + 1-1im 1+1im +``` +""" +@inline conj(rowvec::RowVector) = RowVector(_conj(rowvec.vec)) +@inline conj(rowvec::RowVector{<:Real}) = rowvec + +# AbstractArray interface +@inline length(rowvec::RowVector) = length(rowvec.vec) +@inline size(rowvec::RowVector) = (1, length(rowvec.vec)) +@inline size(rowvec::RowVector, d) = ifelse(d==2, length(rowvec.vec), 1) +@inline axes(rowvec::RowVector) = (Base.OneTo(1), axes(rowvec.vec)[1]) +@inline axes(rowvec::RowVector, d) = ifelse(d == 2, axes(rowvec.vec)[1], Base.OneTo(1)) +IndexStyle(::RowVector) = IndexLinear() +IndexStyle(::Type{<:RowVector}) = IndexLinear() + +@propagate_inbounds getindex(rowvec::RowVector, i::Int) = rvtranspose(rowvec.vec[i]) +@propagate_inbounds setindex!(rowvec::RowVector, v, i::Int) = (setindex!(rowvec.vec, rvtranspose(v), i); rowvec) + +# Keep a RowVector where appropriate +@propagate_inbounds getindex(rowvec::RowVector, ::Colon, i::Int) = rvtranspose.(rowvec.vec[i:i]) +@propagate_inbounds getindex(rowvec::RowVector, ::Colon, inds::AbstractArray{Int}) = RowVector(rowvec.vec[inds]) +@propagate_inbounds getindex(rowvec::RowVector, ::Colon, ::Colon) = RowVector(rowvec.vec[:]) + +# helper function for below +@inline to_vec(rowvec::RowVector) = map(rvtranspose, rvtranspose(rowvec)) +@inline to_vec(x::Number) = x +@inline to_vecs(rowvecs...) = (map(to_vec, rowvecs)...,) + +# map: Preserve the RowVector by un-wrapping and re-wrapping, but note that `f` +# expects to operate within the transposed domain, so to_vec transposes the elements +@inline map(f, rowvecs::RowVector...) = RowVector(map(rvtranspose∘f, to_vecs(rowvecs...)...)) + +# broacast (other combinations default to higher-dimensional array) +@inline broadcast(f, rowvecs::Union{Number,RowVector}...) = + RowVector(broadcast(transpose∘f, to_vecs(rowvecs...)...)) + +# Horizontal concatenation # + +@inline hcat(X::RowVector...) = rvtranspose(vcat(map(rvtranspose, X)...)) +@inline hcat(X::Union{RowVector,Number}...) = rvtranspose(vcat(map(rvtranspose, X)...)) + +@inline typed_hcat(::Type{T}, X::RowVector...) where {T} = + rvtranspose(typed_vcat(T, map(rvtranspose, X)...)) +@inline typed_hcat(::Type{T}, X::Union{RowVector,Number}...) where {T} = + rvtranspose(typed_vcat(T, map(rvtranspose, X)...)) + +# Multiplication # + +# inner product -> dot product specializations +@inline *(rowvec::RowVector{T}, vec::AbstractVector{T}) where {T<:Real} = dot(parent(rowvec), vec) +@inline *(rowvec::ConjRowVector{T}, vec::AbstractVector{T}) where {T<:Real} = dot(rvadjoint(rowvec), vec) +@inline *(rowvec::ConjRowVector, vec::AbstractVector) = dot(rvadjoint(rowvec), vec) + +# Generic behavior +@inline function *(rowvec::RowVector, vec::AbstractVector) + if length(rowvec) != length(vec) + throw(DimensionMismatch("A has dimensions $(size(rowvec)) but B has dimensions $(size(vec))")) + end + sum(@inbounds(return rowvec[i]*vec[i]) for i = 1:length(vec)) +end +@inline *(rowvec::RowVector, mat::AbstractMatrix) = rvtranspose(transpose(mat) * rvtranspose(rowvec)) +*(::RowVector, ::RowVector) = throw(DimensionMismatch("Cannot multiply two transposed vectors")) +@inline *(vec::AbstractVector, rowvec::RowVector) = vec .* rowvec +*(vec::AbstractVector, rowvec::AbstractVector) = throw(DimensionMismatch("Cannot multiply two vectors")) + +# Transposed forms +*(::RowVector, ::Transpose{<:Any,<:AbstractVector}) = + throw(DimensionMismatch("Cannot multiply two transposed vectors")) +*(rowvec::RowVector, transmat::Transpose{<:Any,<:AbstractMatrix}) = + (mat = transmat.parent; rvtranspose(mat * rvtranspose(rowvec))) +*(rowvec1::RowVector, transrowvec2::Transpose{<:Any,<:RowVector}) = + (rowvec2 = transrowvec2.parent; rowvec1*rvtranspose(rowvec2)) +*(::AbstractVector, ::Transpose{<:Any,<:RowVector}) = + throw(DimensionMismatch("Cannot multiply two vectors")) +*(mat::AbstractMatrix, transrowvec::Transpose{<:Any,<:RowVector}) = + (rowvec = transrowvec.parent; mat * rvtranspose(rowvec)) + +*(transrowvec::Transpose{<:Any,<:RowVector}, transvec::Transpose{<:Any,<:AbstractVector}) = + rvtranspose(transrowvec.parent) * transpose(transvec.parent) +*(transrowvec1::Transpose{<:Any,<:RowVector}, transrowvec2::Transpose{<:Any,<:RowVector}) = + throw(DimensionMismatch("Cannot multiply two vectors")) +*(transvec::Transpose{<:Any,<:AbstractVector}, transrowvec::Transpose{<:Any,<:RowVector}) = + transpose(transvec.parent)*rvtranspose(transrowvec.parent) +*(transmat::Transpose{<:Any,<:AbstractMatrix}, transrowvec::Transpose{<:Any,<:RowVector}) = + transmat * rvtranspose(transrowvec.parent) + +*(::Transpose{<:Any,<:RowVector}, ::AbstractVector) = + throw(DimensionMismatch("Cannot multiply two vectors")) +*(transrowvec1::Transpose{<:Any,<:RowVector}, rowvec2::RowVector) = + rvtranspose(transrowvec1.parent) * rowvec2 +*(transvec::Transpose{<:Any,<:AbstractVector}, rowvec::RowVector) = + throw(DimensionMismatch("Cannot multiply two transposed vectors")) + +# Conjugated forms +*(::RowVector, ::Adjoint{<:Any,<:AbstractVector}) = + throw(DimensionMismatch("Cannot multiply two transposed vectors")) +*(rowvec::RowVector, adjmat::Adjoint{<:Any,<:AbstractMatrix}) = + rvadjoint(adjmat.parent * rvadjoint(rowvec)) +*(rowvec1::RowVector, adjrowvec2::Adjoint{<:Any,<:RowVector}) = + rowvec1 * rvadjoint(adjrowvec2.parent) +*(vec::AbstractVector, adjrowvec::Adjoint{<:Any,<:RowVector}) = + throw(DimensionMismatch("Cannot multiply two vectors")) +*(mat::AbstractMatrix, adjrowvec::Adjoint{<:Any,<:RowVector}) = + mat * rvadjoint(adjrowvec.parent) + +*(adjrowvec::Adjoint{<:Any,<:RowVector}, adjvec::Adjoint{<:Any,<:AbstractVector}) = + rvadjoint(adjrowvec.parent) * adjoint(adjvec.parent) +*(adjrowvec1::Adjoint{<:Any,<:RowVector}, adjrowvec2::Adjoint{<:Any,<:RowVector}) = + throw(DimensionMismatch("Cannot multiply two vectors")) +*(adjvec::Adjoint{<:Any,<:AbstractVector}, adjrowvec::Adjoint{<:Any,<:RowVector}) = + adjoint(adjvec.parent)*rvadjoint(adjrowvec.parent) +*(adjmat::Adjoint{<:Any,<:AbstractMatrix}, adjrowvec::Adjoint{<:Any,<:RowVector}) = + adjoint(adjmat.parent) * rvadjoint(adjrowvec.parent) + +*(::Adjoint{<:Any,<:RowVector}, ::AbstractVector) = throw(DimensionMismatch("Cannot multiply two vectors")) +*(adjrowvec1::Adjoint{<:Any,<:RowVector}, rowvec2::RowVector) = rvadjoint(adjrowvec1.parent) * rowvec2 +*(adjvec::Adjoint{<:Any,<:AbstractVector}, rowvec::RowVector) = throw(DimensionMismatch("Cannot multiply two transposed vectors")) + +# Pseudo-inverse +pinv(v::RowVector, tol::Real=0) = rvadjoint(pinv(rvadjoint(v), tol)) + +# Left Division # + +\(rowvec1::RowVector, rowvec2::RowVector) = pinv(rowvec1) * rowvec2 +\(mat::AbstractMatrix, rowvec::RowVector) = throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) +\(transmat::Transpose{<:Any,<:AbstractMatrix}, rowvec::RowVector) = + throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) +\(adjmat::Adjoint{<:Any,<:AbstractMatrix}, rowvec::RowVector) = + throw(DimensionMismatch("Cannot left-divide transposed vector by matrix")) + +# Right Division # + +@inline /(rowvec::RowVector, mat::AbstractMatrix) = rvtranspose(transpose(mat) \ rvtranspose(rowvec)) +/(rowvec::RowVector, transmat::Transpose{<:Any,<:AbstractMatrix}) = rvtranspose(transmat.parent \ rvtranspose(rowvec)) +/(rowvec::RowVector, adjmat::Adjoint{<:Any,<:AbstractMatrix}) = rvadjoint(adjmat.parent \ rvadjoint(rowvec)) + + +# definitions necessary for test/linalg/dense.jl to pass +# should be cleaned up / revised as necessary in the future +/(A::Number, B::Adjoint{<:Any,<:RowVector}) = /(A, rvadjoint(B.parent)) +/(A::Matrix, B::RowVector) = rvadjoint(rvadjoint(B) \ adjoint(A)) + + +# dismabiguation methods +*(A::Adjoint{<:Any,<:AbstractVector}, B::Transpose{<:Any,<:RowVector}) = adjoint(A.parent) * B +*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:RowVector}) = A * rvtranspose(B.parent) +*(A::Transpose{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:RowVector}) = transpose(A.parent) * B +*(A::Transpose{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(B.parent) + +# deprecate RowVector{T}(shape...) constructors to RowVector{T}(uninitialized, shape...) equivalents +@deprecate RowVector{T}(n::Int) where {T} RowVector{T}(uninitialized, n) +@deprecate RowVector{T}(n1::Int, n2::Int) where {T} RowVector{T}(uninitialized, n1, n2) +@deprecate RowVector{T}(n::Tuple{Int}) where {T} RowVector{T}(uninitialized, n) +@deprecate RowVector{T}(n::Tuple{Int,Int}) where {T} RowVector{T}(uninitialized, n) + +# operations formerly exported from and imported/extended by LinearAlgebra +import Base: A_mul_Bt, At_ldiv_Bt, A_rdiv_Bc, At_ldiv_B, Ac_mul_Bc, A_mul_Bc, Ac_mul_B, + Ac_ldiv_B, Ac_ldiv_Bc, At_mul_Bt, A_rdiv_Bt, At_mul_B + +# most of these explicit exports are of course obviated by the deprecations below +# but life is easier just leaving them for now... +export A_ldiv_B!, + A_ldiv_Bc, + A_ldiv_Bt, + A_mul_B!, + A_mul_Bc, + A_mul_Bc!, + A_mul_Bt, + A_mul_Bt!, + A_rdiv_Bc, + A_rdiv_Bt, + Ac_ldiv_B, + Ac_ldiv_Bc, + Ac_ldiv_B!, + Ac_mul_B, + Ac_mul_B!, + Ac_mul_Bc, + Ac_mul_Bc!, + Ac_rdiv_B, + Ac_rdiv_Bc, + At_ldiv_B, + At_ldiv_Bt, + At_ldiv_B!, + At_mul_B, + At_mul_B!, + At_mul_Bt, + At_mul_Bt!, + At_rdiv_B, + At_rdiv_Bt + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/bidiag.jl, to deprecate +@deprecate A_mul_B!(C::AbstractMatrix, A::SymTridiagonal, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::BiTri, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::BiTriSym, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::AbstractTriangular, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::AbstractMatrix, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::Diagonal, B::BiTriSym) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractVector, A::BiTri, B::AbstractVector) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::BiTri, B::AbstractVecOrMat) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractVecOrMat, A::BiTri, B::AbstractVecOrMat) mul!(C, A, B) +@deprecate Ac_ldiv_B(A::Bidiagonal, v::RowVector) (\)(adjoint(A), v) +@deprecate At_ldiv_B(A::Bidiagonal, v::RowVector) (\)(transpose(A), v) +@deprecate Ac_ldiv_B(A::Bidiagonal{<:Number}, v::RowVector{<:Number}) (\)(adjoint(A), v) +@deprecate At_ldiv_B(A::Bidiagonal{<:Number}, v::RowVector{<:Number}) (\)(transpose(A), v) +@deprecate Ac_mul_B(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (*)(adjoint(A), B) +@deprecate A_mul_Bc(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (*)(A, adjoint(B)) +@deprecate A_rdiv_Bc(A::Bidiagonal{T}, B::AbstractVector{T}) where {T} (/)(A, adjoint(B)) +@deprecate A_ldiv_B!(A::Union{Bidiagonal, AbstractTriangular}, b::AbstractVector) ldiv!(A, b) +@deprecate At_ldiv_B!(A::Bidiagonal, b::AbstractVector) ldiv!(transpose(A), b) +@deprecate Ac_ldiv_B!(A::Bidiagonal, b::AbstractVector) ldiv!(adjoint(A), b) +@deprecate A_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(A, B) +@deprecate Ac_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(adjoint(A), B) +@deprecate At_ldiv_B!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) ldiv!(transpose(A), B) +@deprecate At_ldiv_B(A::Bidiagonal, B::AbstractVecOrMat) (\)(transpose(A), B) +@deprecate Ac_ldiv_B(A::Bidiagonal, B::AbstractVecOrMat) ldiv!(adjoint(A), B) +@deprecate Ac_ldiv_B(A::Bidiagonal{TA}, B::AbstractVecOrMat{TB}) where {TA<:Number,TB<:Number} (\)(adjoint(A), B) +@deprecate At_ldiv_B(A::Bidiagonal{TA}, B::AbstractVecOrMat{TB}) where {TA<:Number,TB<:Number} (\)(transpose(A), B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/tridiag.jl, to deprecate +@deprecate A_mul_B!(C::StridedVecOrMat, S::SymTridiagonal, B::StridedVecOrMat) mul!(C, S, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/diagonal.jl, to deprecate +@deprecate A_mul_B!(A::Union{LowerTriangular,UpperTriangular}, D::Diagonal) mul!(A, D) +@deprecate A_mul_B!(A::UnitLowerTriangular, D::Diagonal) mul!(A, D) +@deprecate A_mul_B!(A::UnitUpperTriangular, D::Diagonal) mul!(A, D) +@deprecate A_mul_B!(D::Diagonal, B::UnitLowerTriangular) mul!(D, B) +@deprecate A_mul_B!(D::Diagonal, B::UnitUpperTriangular) mul!(D, B) +@deprecate Ac_mul_B(D::Diagonal, B::Diagonal) (*)(adjoint(D), B) +@deprecate Ac_mul_B(A::AbstractTriangular, D::Diagonal) (*)(adjoint(A), D) +@deprecate Ac_mul_B(A::AbstractMatrix, D::Diagonal) (*)(adjoint(A), D) +@deprecate At_mul_B(D::Diagonal, B::Diagonal) (*)(transpose(D), B) +@deprecate At_mul_B(A::AbstractTriangular, D::Diagonal) (*)(transpose(A), D) +@deprecate At_mul_B(A::AbstractMatrix, D::Diagonal) (*)(transpose(A), D) +@deprecate A_mul_Bc(D::Diagonal, B::Diagonal) (*)(D, adjoint(B)) +@deprecate A_mul_Bc(D::Diagonal, B::AbstractTriangular) (*)(D, adjoint(B)) +@deprecate A_mul_Bc(D::Diagonal, Q::Union{QRCompactWYQ,QRPackedQ}) (*)(D, adjoint(Q)) +@deprecate A_mul_Bc(D::Diagonal, A::AbstractMatrix) (*)(D, adjoint(A)) +@deprecate A_mul_Bt(D::Diagonal, B::Diagonal) (*)(D, transpose(B)) +@deprecate A_mul_Bt(D::Diagonal, B::AbstractTriangular) (*)(D, transpose(B)) +@deprecate A_mul_Bt(D::Diagonal, A::AbstractMatrix) (*)(D, transpose(A)) +@deprecate Ac_mul_Bc(D::Diagonal, B::Diagonal) (*)(adjoint(D), adjoint(B)) +@deprecate At_mul_Bt(D::Diagonal, B::Diagonal) (*)(transpose(D), transpose(B)) +@deprecate A_mul_B!(A::Diagonal,B::Diagonal) mul!(A, B) +@deprecate At_mul_B!(A::Diagonal,B::Diagonal) mul!(transpose(A), B) +@deprecate Ac_mul_B!(A::Diagonal,B::Diagonal) mul!(adjoint(A), B) +@deprecate A_mul_B!(A::QRPackedQ, D::Diagonal) mul!(A, D) +@deprecate A_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(A, B) +@deprecate At_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(transpose(A), B) +@deprecate Ac_mul_B!(A::Diagonal,B::AbstractMatrix) mul!(adjoint(A), B) +@deprecate A_mul_B!(A::AbstractMatrix,B::Diagonal) mul!(A, B) +@deprecate A_mul_Bt!(A::AbstractMatrix,B::Diagonal) mul!(A, transpose(B)) +@deprecate A_mul_Bc!(A::AbstractMatrix,B::Diagonal) mul!(A, adjoint(B)) +@deprecate A_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, A, in) +@deprecate Ac_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, adjoint(A), in) +@deprecate At_mul_B!(out::AbstractVector, A::Diagonal, in::AbstractVector) mul!(out, transpose(A), in) +@deprecate A_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, A, in) +@deprecate Ac_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, adjoint(A), in) +@deprecate At_mul_B!(out::AbstractMatrix, A::Diagonal, in::AbstractMatrix) mul!(out, transpose(A), in) +@deprecate A_mul_Bt(A::Diagonal, B::RealHermSymComplexSym) (*)(A, transpose(B)) +@deprecate At_mul_B(A::RealHermSymComplexSym, B::Diagonal) (*)(transpose(A), B) +@deprecate A_mul_Bc(A::Diagonal, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) +@deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::Diagonal) (*)(adjoint(A), B) +@deprecate A_ldiv_B!(D::Diagonal{T}, v::AbstractVector{T}) where {T} ldiv!(D, v) +@deprecate A_ldiv_B!(D::Diagonal{T}, V::AbstractMatrix{T}) where {T} ldiv!(D, V) +@deprecate Ac_ldiv_B!(D::Diagonal{T}, B::AbstractVecOrMat{T}) where {T} ldiv!(adjoint(D), B) +@deprecate At_ldiv_B!(D::Diagonal{T}, B::AbstractVecOrMat{T}) where {T} ldiv!(transpose(D), B) +@deprecate A_rdiv_B!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, D) +@deprecate A_rdiv_Bc!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, adjoint(D)) +@deprecate A_rdiv_Bt!(A::AbstractMatrix{T}, D::Diagonal{T}) where {T} rdiv!(A, transpose(D)) +@deprecate Ac_ldiv_B(F::Factorization, D::Diagonal) (\)(adjoint(F), D) +@deprecate A_mul_Bt(D::Diagonal, rowvec::RowVector) (*)(D, transpose(rowvec)) +@deprecate A_mul_Bc(D::Diagonal, rowvec::RowVector) (*)(D, adjoint(rowvec)) +@deprecate A_ldiv_B!(D::Diagonal, B::StridedVecOrMat) ldiv!(D, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/special.jl, to deprecate +@deprecate A_mul_Bc!(A::AbstractTriangular, B::Union{QRCompactWYQ,QRPackedQ}) mul!(A, adjoint(B)) +@deprecate A_mul_Bc(A::AbstractTriangular, B::Union{QRCompactWYQ,QRPackedQ}) (*)(A, adjoint(B)) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/bunchkaufman.jl, to deprecate +@deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{T}) where {T<:BlasReal} ldiv!(B, R) +@deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(B, R) +@deprecate A_ldiv_B!(B::BunchKaufman{T}, R::StridedVecOrMat{S}) where {T,S} ldiv!(B, R) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/cholesky.jl, to deprecate +@deprecate A_ldiv_B!(C::Cholesky{T,<:AbstractMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(C, B) +@deprecate A_ldiv_B!(C::Cholesky{<:Any,<:AbstractMatrix}, B::StridedVecOrMat) ldiv!(C, B) +@deprecate A_ldiv_B!(C::CholeskyPivoted{T}, B::StridedVector{T}) where {T<:BlasFloat} ldiv!(C, B) +@deprecate A_ldiv_B!(C::CholeskyPivoted{T}, B::StridedMatrix{T}) where {T<:BlasFloat} ldiv!(C, B) +@deprecate A_ldiv_B!(C::CholeskyPivoted, B::StridedVector) ldiv!(C, B) +@deprecate A_ldiv_B!(C::CholeskyPivoted, B::StridedMatrix) ldiv!(C, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/factorization.jl, to deprecate +@deprecate Ac_ldiv_B(F::Factorization, B::AbstractVecOrMat) (\)(adjoint(F), B) +@deprecate A_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, A, B) +@deprecate Ac_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, adjoint(A), B) +@deprecate At_ldiv_B!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) ldiv!(Y, transpose(A), B) +@deprecate At_ldiv_B(F::Factorization{<:Real}, B::AbstractVecOrMat) (\)(transpose(F), B) +@deprecate At_ldiv_B(F::Factorization, B) (\)(transpose(F), B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/hessenberg.jl, to deprecate +@deprecate A_mul_B!(Q::HessenbergQ{T}, X::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(Q, X) +@deprecate A_mul_B!(X::StridedMatrix{T}, Q::HessenbergQ{T}) where {T<:BlasFloat} mul!(X, Q) +@deprecate Ac_mul_B!(Q::HessenbergQ{T}, X::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(adjoint(Q), X) +@deprecate A_mul_Bc!(X::StridedMatrix{T}, Q::HessenbergQ{T}) where {T<:BlasFloat} mul!(X, adjoint(Q)) +@deprecate Ac_mul_B(Q::HessenbergQ{T}, X::StridedVecOrMat{S}) where {T,S} (*)(adjoint(Q), X) +@deprecate A_mul_Bc(X::StridedVecOrMat{S}, Q::HessenbergQ{T}) where {T,S} (*)(X, adjoint(Q)) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/ldlt.jl, to deprecate +@deprecate A_ldiv_B!(S::LDLt{T,M}, B::AbstractVecOrMat{T}) where {T,M<:SymTridiagonal{T}} ldiv!(S, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/svd.jl, to deprecate +@deprecate A_ldiv_B!(A::SVD{T}, B::StridedVecOrMat) where {T} ldiv!(A, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/symmetric.jl, to deprecate +@deprecate A_mul_B!(y::StridedVector{T}, A::Symmetric{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, A, x) +@deprecate A_mul_B!(y::StridedVector{T}, A::Hermitian{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasReal} mul!(y, A, x) +@deprecate A_mul_B!(y::StridedVector{T}, A::Hermitian{T,<:StridedMatrix}, x::StridedVector{T}) where {T<:BlasComplex} mul!(y, A, x) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::Symmetric{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Symmetric{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::Hermitian{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasReal} mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Hermitian{T,<:StridedMatrix}) where {T<:BlasReal} mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::Hermitian{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasComplex} mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedMatrix{T}, B::Hermitian{T,<:StridedMatrix}) where {T<:BlasComplex} mul!(C, A, B) +@deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractVector) (*)(transpose(A), B) +@deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractMatrix) (*)(transpose(A), B) +@deprecate A_mul_Bt(A::AbstractMatrix, B::RealHermSymComplexSym) (*)(A, transpose(B)) +@deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractVector) (*)(adjoint(A), B) +@deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractMatrix) (*)(adjoint(A), B) +@deprecate A_mul_Bc(A::AbstractMatrix, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) +@deprecate A_mul_Bt(A::RowVector, B::RealHermSymComplexSym) (*)(A, transpose(B)) +@deprecate A_mul_Bc(A::RowVector, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) +@deprecate At_mul_B(A::RealHermSymComplexSym, B::AbstractTriangular) (*)(transpose(A), B) +@deprecate A_mul_Bt(A::AbstractTriangular, B::RealHermSymComplexSym) (*)(A, transpose(B)) +@deprecate Ac_mul_B(A::RealHermSymComplexHerm, B::AbstractTriangular) (*)(adjoint(A), B) +@deprecate A_mul_Bc(A::AbstractTriangular, B::RealHermSymComplexHerm) (*)(A, adjoint(B)) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/lu.jl, to deprecate +@deprecate A_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) +@deprecate A_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(A, B) +@deprecate At_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(transpose(A), B) +@deprecate At_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(transpose(A), B) +@deprecate Ac_ldiv_B!(F::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:Real} ldiv!(adjoint(F), B) +@deprecate Ac_ldiv_B!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(adjoint(A), B) +@deprecate Ac_ldiv_B!(A::LU{<:Any,<:StridedMatrix}, B::StridedVecOrMat) ldiv!(adjoint(A), B) +@deprecate At_ldiv_Bt(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} (\)(transpose(A), transpose(B)) +@deprecate At_ldiv_Bt(A::LU, B::StridedVecOrMat) (\)(transpose(A), transpose(B)) +@deprecate Ac_ldiv_Bc(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} (\)(adjoint(A), adjoint(B)) +@deprecate Ac_ldiv_Bc(A::LU, B::StridedVecOrMat) (\)(adjoint(A), adjoint(B)) +@deprecate A_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} ldiv!(A, B) +@deprecate At_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} (\)(transpose(A), B) +@deprecate Ac_ldiv_B!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} ldiv!(adjoint(A), B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/lq.jl, to deprecate +@deprecate A_mul_B!(A::LQ{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(A, B) +@deprecate A_mul_B!(A::LQ{T}, B::QR{T}) where {T<:BlasFloat} mul!(A, B) +@deprecate A_mul_B!(A::QR{T}, B::LQ{T}) where {T<:BlasFloat} mul!(A, B) +@deprecate A_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(A, B) +@deprecate Ac_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasReal} mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(adjoint(A), B) +@deprecate Ac_mul_B(A::LQPackedQ, B::StridedVecOrMat) (*)(adjoint(A), B) +@deprecate A_mul_Bc(A::LQPackedQ, B::StridedVecOrMat) (*)(A, adjoint(B)) +@deprecate Ac_mul_Bc(A::LQPackedQ, B::StridedVecOrMat) (*)(adjoint(A), adjoint(B)) +@deprecate A_mul_B!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasFloat} mul!(A, B) +@deprecate A_mul_Bc!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedMatrix{T}, B::LQPackedQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) +@deprecate A_mul_Bc(A::StridedVecOrMat, Q::LQPackedQ) (*)(A, adjoint(Q)) +@deprecate Ac_mul_Bc(A::StridedMatrix, Q::LQPackedQ) (*)(adjoint(A), adjoint(Q)) +@deprecate Ac_mul_B(A::StridedMatrix, Q::LQPackedQ) (*)(adjoint(A), Q) +@deprecate A_ldiv_B!(A::LQ{T}, B::StridedVecOrMat{T}) where {T} ldiv!(A, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/qr.jl, to deprecate +@deprecate A_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:StridedMatrix} mul!(A, B) +@deprecate A_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:StridedMatrix} mul!(A, B) +@deprecate A_mul_B!(A::QRPackedQ, B::AbstractVecOrMat) mul!(A, B) +@deprecate Ac_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasReal,S<:StridedMatrix} mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasComplex,S<:StridedMatrix} mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasReal,S<:StridedMatrix} mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasComplex,S<:StridedMatrix} mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::QRPackedQ, B::AbstractVecOrMat) mul!(adjoint(A), B) +@deprecate Ac_mul_B(Q::AbstractQ, B::StridedVecOrMat) (*)(adjoint(Q), B) +@deprecate A_mul_Bc(Q::AbstractQ, B::StridedVecOrMat) (*)(Q, adjoint(B)) +@deprecate Ac_mul_Bc(Q::AbstractQ, B::StridedVecOrMat) (*)(adjoint(Q), adjoint(B)) +@deprecate A_mul_B!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T,S}) where {T<:BlasFloat,S<:StridedMatrix} mul!(A, B) +@deprecate A_mul_B!(A::StridedVecOrMat{T}, B::QRPackedQ{T,S}) where {T<:BlasFloat,S<:StridedMatrix} mul!(A, B) +@deprecate A_mul_B!(A::StridedMatrix,Q::QRPackedQ) mul!(A, Q) +@deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRPackedQ{T}) where {T<:BlasReal} mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedVecOrMat{T}, B::QRPackedQ{T}) where {T<:BlasComplex} mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedMatrix,Q::QRPackedQ) mul!(A, adjoint(Q)) +@deprecate A_mul_Bc(A::StridedMatrix, B::AbstractQ) (*)(A, adjoint(B)) +@deprecate A_mul_Bc(rowvec::RowVector, B::AbstractQ) (*)(rowvec, adjoint(B)) +@deprecate Ac_mul_B(A::StridedVecOrMat, Q::AbstractQ) (*)(adjoint(A), Q) +@deprecate Ac_mul_Bc(A::StridedVecOrMat, Q::AbstractQ) (*)(adjoint(A), adjoint(Q)) +@deprecate A_ldiv_B!(A::QRCompactWY{T}, b::StridedVector{T}) where {T<:BlasFloat} ldiv!(A, b) +@deprecate A_ldiv_B!(A::QRCompactWY{T}, B::StridedMatrix{T}) where {T<:BlasFloat} ldiv!(A, B) +@deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedMatrix{T}, rcond::Real) where {T<:BlasFloat} ldiv!(A, B, rcond) +@deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedVector{T}) where {T<:BlasFloat} ldiv!(A, B) +@deprecate A_ldiv_B!(A::QRPivoted{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) +@deprecate A_ldiv_B!(A::QR{T}, B::StridedMatrix{T}) where {T} ldiv!(A, B) +@deprecate A_ldiv_B!(A::QR, B::StridedVector) ldiv!(A, B) +@deprecate A_ldiv_B!(A::QRPivoted, b::StridedVector) ldiv!(A, b) +@deprecate A_ldiv_B!(A::QRPivoted, B::StridedMatrix) ldiv!(A, B) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/matmul.jl, to deprecate +@deprecate Ac_mul_Bc(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(adjoint(A), adjoint(B)) +@deprecate Ac_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, adjoint(A), adjoint(B)) +@deprecate Ac_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), adjoint(B)) +@deprecate Ac_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), transpose(B)) +@deprecate A_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(C, A, adjoint(B)) +@deprecate A_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) +@deprecate A_mul_Bc(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(A, adjoint(B)) +@deprecate A_mul_Bc(A::StridedMatrix{<:BlasFloat}, B::StridedMatrix{<:BlasReal}) (*)(A, adjoint(B)) +@deprecate A_mul_Bc!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{<:BlasReal}) where {T<:BlasFloat} mul!(C, A, adjoint(B)) +@deprecate Ac_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasComplex} mul!(C, adjoint(A), B) +@deprecate Ac_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, adjoint(A), B) +@deprecate Ac_mul_B(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(adjoint(A), B) +@deprecate Ac_mul_B(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasReal} (*)(adjoint(A), B) +@deprecate Ac_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasReal} mul!(C, adjoint(A), B) +@deprecate At_mul_Bt!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, transpose(A), transpose(B)) +@deprecate At_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, transpose(A), transpose(B)) +@deprecate At_mul_Bt(A::AbstractMatrix{T}, B::AbstractVecOrMat{S}) where {T,S} (*)(transpose(A), transpose(B)) +@deprecate A_mul_Bt!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, transpose(B)) +@deprecate A_mul_Bt!(C::StridedMatrix{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, B::StridedVecOrMat{Float32}) mul!(C, A, transpose(B)) +@deprecate A_mul_Bt!(C::StridedMatrix{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, B::StridedVecOrMat{Float64}) mul!(C, A, transpose(B)) +@deprecate A_mul_Bt!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, A, transpose(B)) +@deprecate A_mul_Bt(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(A, transpose(B)) +@deprecate At_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, transpose(A), B) +@deprecate At_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, transpose(A), B) +@deprecate At_mul_B(A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} (*)(transpose(A), B) +@deprecate A_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, B::StridedVecOrMat{Float32}) mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, B::StridedVecOrMat{Float64}) mul!(C, A, B) +@deprecate A_mul_B!(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} mul!(C, A, B) +@deprecate Ac_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasReal} mul!(y, adjoint(A), x) +@deprecate Ac_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasComplex} mul!(y, adjoint(A), x) +@deprecate Ac_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, adjoint(A), x) +@deprecate Ac_mul_B(A::StridedMatrix{T}, x::StridedVector{S}) where {T<:BlasFloat,S} (*)(adjoint(A), x) +@deprecate Ac_mul_B(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} (*)(adjoint(A), x) +@deprecate At_mul_B(A::StridedMatrix{T}, x::StridedVector{S}) where {T<:BlasFloat,S} (*)(transpose(A), x) +@deprecate At_mul_B(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} (*)(transpose(A), x) +@deprecate At_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, transpose(A), x) +@deprecate At_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, transpose(A), x) +@deprecate A_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) mul!(y, A, x) +@deprecate A_mul_B!(y::StridedVector{Complex{Float32}}, A::StridedVecOrMat{Complex{Float32}}, x::StridedVector{Float32}) mul!(y, A, x) +@deprecate A_mul_B!(y::StridedVector{Complex{Float64}}, A::StridedVecOrMat{Complex{Float64}}, x::StridedVector{Float64}) mul!(y, A, x) +@deprecate A_mul_B!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) where {T<:BlasFloat} mul!(y, A, x) +@deprecate A_mul_Bt(a::AbstractVector, B::AbstractMatrix) (*)(a, transpose(B)) +@deprecate A_mul_Bt(A::AbstractMatrix, b::AbstractVector) (*)(A, transpose(b)) +@deprecate A_mul_Bc(a::AbstractVector, B::AbstractMatrix) (*)(a, adjoint(B)) +@deprecate A_mul_Bc(A::AbstractMatrix, b::AbstractVector) (*)(A, adjoint(b)) +@deprecate At_mul_B(x::StridedVector{T}, y::StridedVector{T}) where {T<:BlasComplex} (*)(transpose(x), y) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/triangular.jl, to deprecate +@deprecate A_mul_Bc(A::AbstractTriangular, B::AbstractTriangular) (*)(A, adjoint(B)) +@deprecate A_mul_Bt(A::AbstractTriangular, B::AbstractTriangular) (*)(A, transpose(B)) +@deprecate Ac_mul_B(A::AbstractTriangular, B::AbstractTriangular) (*)(adjoint(A), B) +@deprecate At_mul_B(A::AbstractTriangular, B::AbstractTriangular) (*)(transpose(A), B) +@deprecate Ac_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::RowVector) (\)(adjoint(A), B) +@deprecate Ac_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::RowVector) (\)(adjoint(A), B) +@deprecate At_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::RowVector) (\)(transpose(A), B) +@deprecate At_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::RowVector) (\)(transpose(A), B) +@deprecate A_rdiv_Bc(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) (/)(rowvec, adjoint(A)) +@deprecate A_rdiv_Bc(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) (/)(rowvec, adjoint(A)) +@deprecate A_rdiv_Bt(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) (/)(rowvec, transpose(A)) +@deprecate A_rdiv_Bt(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) (/)(rowvec, transpose(A)) +@deprecate A_mul_Bt(rowvec::RowVector, A::AbstractTriangular) (*)(rowvec, transpose(A)) +@deprecate A_mul_Bt(A::AbstractTriangular, rowvec::RowVector) (*)(A, transpose(rowvec)) +@deprecate At_mul_Bt(A::AbstractTriangular, rowvec::RowVector) (*)(transpose(A), transpose(rowvec)) +@deprecate A_mul_Bc(rowvec::RowVector, A::AbstractTriangular) (*)(rowvec, adjoint(A)) +@deprecate A_mul_Bc(A::AbstractTriangular, rowvec::RowVector) (*)(A, adjoint(rowvec)) +@deprecate Ac_mul_Bc(A::AbstractTriangular, rowvec::RowVector) (*)(adjoint(A), adjoint(rowvec)) +@deprecate Ac_mul_B(A::AbstractMatrix, B::AbstractTriangular) (*)(adjoint(A), B) +@deprecate At_mul_B(A::AbstractMatrix, B::AbstractTriangular) (*)(transpose(A), B) +@deprecate A_mul_Bc(A::AbstractTriangular, B::AbstractMatrix) (*)(A, adjoint(B)) +@deprecate A_mul_Bt(A::AbstractTriangular, B::AbstractMatrix) (*)(A, transpose(B)) +@deprecate Ac_mul_Bc(A::AbstractTriangular, B::AbstractTriangular) (*)(adjoint(A), adjoint(B)) +@deprecate Ac_mul_Bc(A::AbstractTriangular, B::AbstractMatrix) (*)(adjoint(A), adjoint(B)) +@deprecate Ac_mul_Bc(A::AbstractMatrix, B::AbstractTriangular) (*)(adjoint(A), adjoint(B)) +@deprecate At_mul_Bt(A::AbstractTriangular, B::AbstractTriangular) (*)(transpose(A), transpose(B)) +@deprecate At_mul_Bt(A::AbstractTriangular, B::AbstractMatrix) (*)(transpose(A), transpose(B)) +@deprecate At_mul_Bt(A::AbstractMatrix, B::AbstractTriangular) (*)(transpose(A), transpose(B)) +@deprecate A_mul_Bc!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) mul!(A, adjoint(B)) +@deprecate A_mul_Bt!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) mul!(A, transpose(B)) +@deprecate A_mul_Bt!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) mul!(A, transpose(B)) +@deprecate A_rdiv_Bc!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_Bc!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_Bt!(A::UpperTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, transpose(B)) +@deprecate A_rdiv_Bt!(A::LowerTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, transpose(B)) +@deprecate A_rdiv_B!(A::UpperTriangular, B::Union{UpperTriangular,UnitUpperTriangular}) rdiv!(A, B) +@deprecate A_rdiv_B!(A::LowerTriangular, B::Union{LowerTriangular,UnitLowerTriangular}) rdiv!(A, B) +@deprecate Ac_mul_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) mul!(adjoint(A), B) +@deprecate At_mul_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) mul!(transpose(A), B) +@deprecate At_mul_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) mul!(transpose(A), B) +@deprecate Ac_ldiv_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) ldiv!(adjoint(A), B) +@deprecate Ac_ldiv_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) ldiv!(adjoint(A), B) +@deprecate At_ldiv_B!(A::Union{LowerTriangular,UnitLowerTriangular}, B::UpperTriangular) ldiv!(transpose(A), B) +@deprecate At_ldiv_B!(A::Union{UpperTriangular,UnitUpperTriangular}, B::LowerTriangular) ldiv!(transpose(A), B) +@deprecate A_rdiv_Bt!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, transpose(B)) +@deprecate A_rdiv_Bt!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, transpose(B)) +@deprecate A_rdiv_Bt!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, transpose(B)) +@deprecate A_rdiv_Bt!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, transpose(B)) +@deprecate A_rdiv_Bc!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_Bc!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_Bc!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_Bc!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, adjoint(B)) +@deprecate A_rdiv_B!(A::StridedMatrix, B::UnitLowerTriangular) rdiv!(A, B) +@deprecate A_rdiv_B!(A::StridedMatrix, B::LowerTriangular) rdiv!(A, B) +@deprecate A_rdiv_B!(A::StridedMatrix, B::UnitUpperTriangular) rdiv!(A, B) +@deprecate A_rdiv_B!(A::StridedMatrix, B::UpperTriangular) rdiv!(A, B) +@deprecate Ac_ldiv_B!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) +@deprecate Ac_ldiv_B!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) +@deprecate Ac_ldiv_B!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) +@deprecate Ac_ldiv_B!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(adjoint(A), b, x) +@deprecate At_ldiv_B!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) +@deprecate At_ldiv_B!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) +@deprecate At_ldiv_B!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) +@deprecate At_ldiv_B!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) ldiv!(transpose(A), b, x) +@deprecate A_mul_Bt!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, transpose(B)) +@deprecate A_mul_Bt!(A::StridedMatrix, B::LowerTriangular) mul!(A, transpose(B)) +@deprecate A_mul_Bt!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, transpose(B)) +@deprecate A_mul_Bt!(A::StridedMatrix, B::UpperTriangular) mul!(A, transpose(B)) +@deprecate A_mul_Bc!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedMatrix, B::LowerTriangular) mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, adjoint(B)) +@deprecate A_mul_Bc!(A::StridedMatrix, B::UpperTriangular) mul!(A, adjoint(B)) +@deprecate A_mul_B!(A::StridedMatrix, B::UnitLowerTriangular) mul!(A, B) +@deprecate A_mul_B!(A::StridedMatrix, B::LowerTriangular) mul!(A, B) +@deprecate A_mul_B!(A::StridedMatrix, B::UnitUpperTriangular) mul!(A, B) +@deprecate A_mul_B!(A::StridedMatrix, B::UpperTriangular) mul!(A, B) +@deprecate At_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(transpose(A), B) +@deprecate At_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(transpose(A), B) +@deprecate At_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(transpose(A), B) +@deprecate At_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(transpose(A), B) +@deprecate Ac_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) +@deprecate Ac_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(adjoint(A), B) +@deprecate A_mul_B!(A::UnitLowerTriangular, B::StridedVecOrMat) mul!(A, B) +@deprecate A_mul_B!(A::LowerTriangular, B::StridedVecOrMat) mul!(A, B) +@deprecate A_mul_B!(A::UnitUpperTriangular, B::StridedVecOrMat) mul!(A, B) +@deprecate A_mul_B!(A::UpperTriangular, B::StridedVecOrMat) mul!(A, B) +@deprecate A_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, B) +@deprecate Ac_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, adjoint(A), B) +@deprecate Ac_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, adjoint(A), B) +@deprecate Ac_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, adjoint(A), B) +@deprecate At_mul_B!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) mul!(C, transpose(A), B) +@deprecate At_mul_B!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, transpose(A), B) +@deprecate At_mul_B!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, transpose(A), B) +@deprecate A_mul_B!(A::Tridiagonal, B::AbstractTriangular) mul!(A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::AbstractTriangular, B::Tridiagonal) mul!(C, A, B) +@deprecate A_mul_B!(C::AbstractMatrix, A::Tridiagonal, B::AbstractTriangular) mul!(C, A, B) +@deprecate A_mul_Bt!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, transpose(B)) +@deprecate A_mul_Bc!(C::AbstractMatrix, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) +@deprecate A_mul_Bc!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) mul!(C, A, adjoint(B)) +for mat in (:AbstractVector, :AbstractMatrix) + @eval begin + @deprecate Ac_mul_B(A::AbstractTriangular, B::$mat) (*)(adjoint(A), B) + @deprecate At_mul_B(A::AbstractTriangular, B::$mat) (*)(transpose(A), B) + @deprecate Ac_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::$mat) (\)(adjoint(A), B) + @deprecate At_ldiv_B(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::$mat) (\)(transpose(A), B) + @deprecate Ac_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::$mat) (\)(adjoint(A), B) + @deprecate At_ldiv_B(A::Union{UpperTriangular,LowerTriangular}, B::$mat) (\)(transpose(A), B) + @deprecate A_rdiv_Bc(A::$mat, B::Union{UnitUpperTriangular, UnitLowerTriangular}) (/)(A, adjoint(B)) + @deprecate A_rdiv_Bt(A::$mat, B::Union{UnitUpperTriangular, UnitLowerTriangular}) (/)(A, transpose(B)) + @deprecate A_rdiv_Bc(A::$mat, B::Union{UpperTriangular,LowerTriangular}) (/)(A, adjoint(B)) + @deprecate A_rdiv_Bt(A::$mat, B::Union{UpperTriangular,LowerTriangular}) (/)(A, transpose(B)) + end +end +@deprecate A_mul_Bc(A::AbstractMatrix, B::AbstractTriangular) (*)(A, adjoint(B)) +@deprecate A_mul_Bt(A::AbstractMatrix, B::AbstractTriangular) (*)(A, transpose(B)) +for (f, op, transform) in ( + (:A_mul_Bc, :*, :adjoint), + (:A_mul_Bt, :*, :transpose), + (:A_rdiv_Bc, :/, :adjoint), + (:A_rdiv_Bt, :/, :transpose)) + @eval begin + @deprecate $f(A::LowerTriangular, B::UpperTriangular) ($op)(A, ($transform)(B)) + @deprecate $f(A::LowerTriangular, B::UnitUpperTriangular) ($op)(A, ($transform)(B)) + @deprecate $f(A::UpperTriangular, B::LowerTriangular) ($op)(A, ($transform)(B)) + @deprecate $f(A::UpperTriangular, B::UnitLowerTriangular) ($op)(A, ($transform)(B)) + end +end +for (f, op, transform) in ( + (:Ac_mul_B, :*, :adjoint), + (:At_mul_B, :*, :transpose), + (:Ac_ldiv_B, :\, :adjoint), + (:At_ldiv_B, :\, :transpose)) + @eval begin + @deprecate ($f)(A::UpperTriangular, B::LowerTriangular) ($op)(($transform)(A), B) + @deprecate ($f)(A::UnitUpperTriangular, B::LowerTriangular) ($op)(($transform)(A), B) + @deprecate ($f)(A::LowerTriangular, B::UpperTriangular) ($op)(($transform)(A), B) + @deprecate ($f)(A::UnitLowerTriangular, B::UpperTriangular) ($op)(($transform)(A), B) + end +end +for (t, uploc, isunitc) in ((:LowerTriangular, 'L', 'N'), + (:UnitLowerTriangular, 'L', 'U'), + (:UpperTriangular, 'U', 'N'), + (:UnitUpperTriangular, 'U', 'U')) + @eval begin + # Vector multiplication + @deprecate A_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasFloat} mul!(A, b) + @deprecate At_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasFloat} mul!(transpose(A), b) + @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasReal} mul!(adjoint(A), b) + @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, b::StridedVector{T}) where {T<:BlasComplex} mul!(adjoint(A), b) + + # Matrix multiplication + @deprecate A_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(A, B) + @deprecate A_mul_B!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(A, B) + + @deprecate At_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasFloat} mul!(transpose(A), B) + @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasComplex} mul!(adjoint(A), B) + @deprecate Ac_mul_B!(A::$t{T,<:StridedMatrix}, B::StridedMatrix{T}) where {T<:BlasReal} mul!(adjoint(A), B) + + @deprecate A_mul_Bt!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} mul!(A, transpose(B)) + @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasComplex} mul!(A, adjoint(B)) + @deprecate A_mul_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasReal} mul!(A, adjoint(B)) + + # Left division + @deprecate A_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(A, B) + @deprecate At_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} ldiv!(transpose(A), B) + @deprecate Ac_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasReal} ldiv!(adjoint(A), B) + @deprecate Ac_ldiv_B!(A::$t{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasComplex} ldiv!(adjoint(A), B) + + # Right division + @deprecate A_rdiv_B!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} rdiv!(A, B) + @deprecate A_rdiv_Bt!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasFloat} rdiv!(A, transpose(B)) + @deprecate A_rdiv_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasReal} rdiv!(A, adjoint(B)) + @deprecate A_rdiv_Bc!(A::StridedMatrix{T}, B::$t{T,<:StridedMatrix}) where {T<:BlasComplex} rdiv!(A, adjoint(B)) + end +end + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/rowvector.jl, to deprecate +@deprecate A_rdiv_Bt(rowvec::RowVector, mat::AbstractMatrix) (/)(rowvec, transpose(mat)) +@deprecate A_rdiv_Bc(rowvec::RowVector, mat::AbstractMatrix) (/)(rowvec, adjoint(mat)) +@deprecate At_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) (\)(transpose(mat), rowvec) +@deprecate Ac_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) (\)(adjoint(mat), rowvec) +@deprecate Ac_mul_B(u::RowVector, v::AbstractVector) (*)(adjoint(u), v) +@deprecate Ac_mul_B(vec::AbstractVector, mat::AbstractMatrix) (*)(adjoint(vec), mat) +@deprecate Ac_mul_B(rowvec1::RowVector, rowvec2::RowVector) (*)(adjoint(rowvec1), rowvec2) +@deprecate Ac_mul_B(vec::AbstractVector, rowvec::RowVector) (*)(adjoint(vec), rowvec) +@deprecate Ac_mul_B(vec1::AbstractVector, vec2::AbstractVector) (*)(adjoint(vec1), vec2) +@deprecate Ac_mul_Bc(rowvec::RowVector, vec::AbstractVector) (*)(adjoint(rowvec), adjoint(vec)) +@deprecate Ac_mul_Bc(vec::AbstractVector, mat::AbstractMatrix) (*)(adjoint(vec), adjoint(mat)) +@deprecate Ac_mul_Bc(rowvec1::RowVector, rowvec2::RowVector) (*)(adjoint(rowvec1), adjoint(rowvec2)) +@deprecate Ac_mul_Bc(vec::AbstractVector, rowvec::RowVector) (*)(adjoint(vec), adjoint(rowvec)) +@deprecate Ac_mul_Bc(vec::AbstractVector, rowvec::AbstractVector) (*)(adjoint(vec), adjoint(rowvec)) +@deprecate Ac_mul_Bc(mat::AbstractMatrix, rowvec::RowVector) (*)(adjoint(mat), adjoint(rowvec)) +@deprecate A_mul_Bc(u::RowVector, v::AbstractVector) (*)(u, adjoint(v)) +@deprecate A_mul_Bc(rowvec::RowVector, mat::AbstractMatrix) (*)(rowvec, adjoint(mat)) +@deprecate A_mul_Bc(rowvec1::RowVector, rowvec2::RowVector) (*)(rowvec1, adjoint(rowvec2)) +@deprecate A_mul_Bc(vec::AbstractVector, rowvec::RowVector) (*)(vec, adjoint(rowvec)) +@deprecate A_mul_Bc(vec1::AbstractVector, vec2::AbstractVector) (*)(vec1, adjoint(vec2)) +@deprecate A_mul_Bc(mat::AbstractMatrix, rowvec::RowVector) (*)(mat, adjoint(rowvec)) +@deprecate At_mul_B(v::RowVector, u::AbstractVector) (*)(transpose(v), u) +@deprecate At_mul_B(vec::AbstractVector, mat::AbstractMatrix) (*)(transpose(vec), mat) +@deprecate At_mul_B(rowvec1::RowVector, rowvec2::RowVector) (*)(transpose(rowvec1), rowvec2) +@deprecate At_mul_B(vec::AbstractVector, rowvec::RowVector) (*)(transpose(vec), rowvec) +@deprecate At_mul_B(vec1::AbstractVector{T}, vec2::AbstractVector{T}) where {T<:Real} (*)(transpose(vec1), vec2) +@deprecate At_mul_B(vec1::AbstractVector, vec2::AbstractVector) (*)(transpose(vec1), vec2) +@deprecate At_mul_Bt(rowvec::RowVector, vec::AbstractVector) (*)(transpose(rowvec), transpose(vec)) +@deprecate At_mul_Bt(vec::AbstractVector, mat::AbstractMatrix) (*)(transpose(vec), transpose(mat)) +@deprecate At_mul_Bt(rowvec1::RowVector, rowvec2::RowVector) (*)(transpose(rowvec1), transpose(rowvec2)) +@deprecate At_mul_Bt(vec::AbstractVector, rowvec::RowVector) (*)(transpose(vec), transpose(rowvec)) +@deprecate At_mul_Bt(vec::AbstractVector, rowvec::AbstractVector) (*)(transpose(vec), transpose(rowvec)) +@deprecate At_mul_Bt(mat::AbstractMatrix, rowvec::RowVector) (*)(transpose(mat), transpose(rowvec)) +@deprecate A_mul_Bt(v::RowVector, A::AbstractVector) (*)(v, transpose(A)) +@deprecate A_mul_Bt(rowvec::RowVector, mat::AbstractMatrix) (*)(rowvec, transpose(mat)) +@deprecate A_mul_Bt(rowvec1::RowVector, rowvec2::RowVector) (*)(rowvec1, transpose(rowvec2)) +@deprecate A_mul_Bt(vec::AbstractVector, rowvec::RowVector) (*)(vec, transpose(rowvec)) +@deprecate A_mul_Bt(vec1::AbstractVector, vec2::AbstractVector) (*)(vec1, transpose(vec2)) +@deprecate A_mul_Bt(mat::AbstractMatrix, rowvec::RowVector) (*)(mat, transpose(rowvec)) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/linalg/givens.jl, to deprecate +@deprecate A_mul_Bc!(A::AbstractMatrix, R::Rotation) mul!(A, adjoint(R)) +@deprecate A_mul_B!(R::Rotation, A::AbstractMatrix) mul!(R, A) +@deprecate A_mul_B!(G::Givens, R::Rotation) mul!(G, R) +@deprecate A_mul_Bc!(A::AbstractMatrix, G::Givens) mul!(A, adjoint(G)) +@deprecate A_mul_B!(G::Givens, A::AbstractVecOrMat) mul!(G, A) +@deprecate A_mul_B!(G1::Givens, G2::Givens) mul!(G1, G2) +@deprecate A_mul_Bc(A::AbstractVecOrMat{T}, R::AbstractRotation{S}) where {T,S} (*)(A, adjoint(R)) + + +# methods involving RowVector from base/linalg/bidiag.jl, to deprecate +\(::Diagonal, ::RowVector) = _mat_ldiv_rowvec_error() +\(::Bidiagonal, ::RowVector) = _mat_ldiv_rowvec_error() +\(::Bidiagonal{<:Number}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() +\(::Adjoint{<:Any,<:Bidiagonal}, ::RowVector) = _mat_ldiv_rowvec_error() +\(::Transpose{<:Any,<:Bidiagonal}, ::RowVector) = _mat_ldiv_rowvec_error() +\(::Adjoint{<:Number,<:Bidiagonal{<:Number}}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() +\(::Transpose{<:Number,<:Bidiagonal{<:Number}}, ::RowVector{<:Number}) = _mat_ldiv_rowvec_error() +_mat_ldiv_rowvec_error() = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) + +# methods involving RowVector from base/linalg/diagonal.jl, to deprecate +*(rowvec::RowVector, D::Diagonal) = rvtranspose(D * rvtranspose(rowvec)) # seems potentially incorrect without also transposing D? +*(D::Diagonal, transrowvec::Transpose{<:Any,<:RowVector}) = (rowvec = transrowvec.parent; D*rvtranspose(rowvec)) +*(D::Diagonal, adjrowvec::Adjoint{<:Any,<:RowVector}) = (rowvec = adjrowvec.parent; D*rvadjoint(rowvec)) + +# methods involving RowVector from base/linalg/qr.jl, to deprecate +*(rowvec::RowVector, adjB::Adjoint{<:Any,<:AbstractQ}) = (B = adjB.parent; rvadjoint(B*rvadjoint(rowvec))) + +# methods involving RowVector from base/linalg/qr.jl, to deprecate +*(A::RowVector, B::Adjoint{<:Any,<:AbstractRotation}) = A * adjoint(B.parent) + +# methods involving RowVector from base/linalg/generic.jl, to deprecate +norm(tv::RowVector, q::Real) = q == Inf ? norm(rvtranspose(tv), 1) : norm(rvtranspose(tv), q/(q-1)) +norm(tv::RowVector) = norm(rvtranspose(tv)) + +# methods involving RowVector from base/linalg/factorization.jl, to deprecate +\(A::Adjoint{<:Any,<:Factorization}, B::RowVector) = adjoint(A.parent) \ B +\(A::Transpose{<:Any,<:Factorization}, B::RowVector) = transpose(A.parent) \ B +\(A::Transpose{<:Any,<:Factorization{<:Real}}, B::RowVector) = transpose(A.parent) \ B + +# methods involving RowVector from base/linalg/symmetric.jl, to deprecate +*(A::RowVector, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = A * transB.parent +*(A::RowVector, adjB::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * adjB.parent +\(A::HermOrSym{<:Any,<:StridedMatrix}, B::RowVector) = invoke(\, Tuple{AbstractMatrix, RowVector}, A, B) +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:RowVector}) = A.parent * B +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:RowVector}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:RowVector}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:RowVector}) = A.parent * B + +# methods involving RowVector from base/linalg/triangular.jl, to deprecate +*(rowvec::RowVector, A::AbstractTriangular) = rvtranspose(transpose(A) * rvtranspose(rowvec)) +*(rowvec::RowVector, transA::Transpose{<:Any,<:AbstractTriangular}) = rvtranspose(transA.parent * rvtranspose(rowvec)) +*(A::AbstractTriangular, transrowvec::Transpose{<:Any,<:RowVector}) = A * rvtranspose(transrowvec.parent) +*(transA::Transpose{<:Any,<:AbstractTriangular}, transrowvec::Transpose{<:Any,<:RowVector}) = transA * rvtranspose(transrowvec.parent) +*(rowvec::RowVector, adjA::Adjoint{<:Any,<:AbstractTriangular}) = rvadjoint(adjA.parent * rvadjoint(rowvec)) +*(A::AbstractTriangular, adjrowvec::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(adjrowvec.parent) +*(adjA::Adjoint{<:Any,<:AbstractTriangular}, adjrowvec::Adjoint{<:Any,<:RowVector}) = adjA * rvadjoint(adjrowvec.parent) +\(::Union{UpperTriangular,LowerTriangular}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Union{UnitUpperTriangular,UnitLowerTriangular}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Adjoint{<:Any,<:Union{UpperTriangular,LowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Adjoint{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Transpose{<:Any,<:Union{UpperTriangular,LowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Transpose{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +/(rowvec::RowVector, A::Union{UpperTriangular,LowerTriangular}) = rvtranspose(transpose(A) \ rvtranspose(rowvec)) +/(rowvec::RowVector, A::Union{UnitUpperTriangular,UnitLowerTriangular}) = rvtranspose(transpose(A) \ rvtranspose(rowvec)) +/(rowvec::RowVector, transA::Transpose{<:Any,<:Union{UpperTriangular,LowerTriangular}}) = rvtranspose(transA.parent \ rvtranspose(rowvec)) +/(rowvec::RowVector, transA::Transpose{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}) = rvtranspose(transA.parent \ rvtranspose(rowvec)) +/(rowvec::RowVector, adjA::Adjoint{<:Any,<:Union{UpperTriangular,LowerTriangular}}) = /(rowvec, adjoint(adjA.parent)) +/(rowvec::RowVector, adjA::Adjoint{<:Any,<:Union{UnitUpperTriangular,UnitLowerTriangular}}) = /(rowvec, adjoint(adjA.parent)) +*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:RowVector}) = A * rvtranspose(B.parent) +*(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:RowVector}) = A * rvadjoint(B.parent) + + + +# PR #25184. Use getproperty instead of getindex for Factorizations +function getindex(F::Factorization, s::Symbol) + depwarn("`F[:$s]` is deprecated, use `F.$s` instead.", :getindex) + return getproperty(F, s) +end +@deprecate getq(F::Factorization) F.Q diff --git a/base/linalg/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl similarity index 100% rename from base/linalg/diagonal.jl rename to stdlib/LinearAlgebra/src/diagonal.jl diff --git a/base/linalg/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl similarity index 99% rename from base/linalg/eigen.jl rename to stdlib/LinearAlgebra/src/eigen.jl index cdf54e5041e8d4..2b3d449b9ec2c6 100644 --- a/base/linalg/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -75,7 +75,7 @@ make rows and columns more equal in norm. The default is `true` for both options # Examples ```jldoctest julia> F = eigfact([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -Base.LinAlg.Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}}([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]) +LinearAlgebra.Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}}([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]) julia> F.values 3-element Array{Float64,1}: diff --git a/base/linalg/exceptions.jl b/stdlib/LinearAlgebra/src/exceptions.jl similarity index 100% rename from base/linalg/exceptions.jl rename to stdlib/LinearAlgebra/src/exceptions.jl diff --git a/base/linalg/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl similarity index 97% rename from base/linalg/factorization.jl rename to stdlib/LinearAlgebra/src/factorization.jl index 13fe9d8201da22..f878f792464f30 100644 --- a/base/linalg/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -24,12 +24,12 @@ Test that a factorization of a matrix succeeded. ```jldoctest julia> F = cholfact([1 0; 0 1]); -julia> LinAlg.issuccess(F) +julia> LinearAlgebra.issuccess(F) true julia> F = lufact([1 0; 0 0]); -julia> LinAlg.issuccess(F) +julia> LinearAlgebra.issuccess(F) false ``` """ diff --git a/base/linalg/generic.jl b/stdlib/LinearAlgebra/src/generic.jl similarity index 99% rename from base/linalg/generic.jl rename to stdlib/LinearAlgebra/src/generic.jl index 215d8b21d189af..f1f7f7b63dd5df 100644 --- a/base/linalg/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -1205,7 +1205,7 @@ scale!(b::AbstractVector, A::AbstractMatrix) = scale!(A,b,A) peakflops(n::Integer=2000; parallel::Bool=false) `peakflops` computes the peak flop rate of the computer by using double precision -[`gemm!`](@ref Base.LinAlg.BLAS.gemm!). By default, if no arguments are specified, it +[`gemm!`](@ref LinearAlgebra.BLAS.gemm!). By default, if no arguments are specified, it multiplies a matrix of size `n x n`, where `n = 2000`. If the underlying BLAS is using multiple threads, higher flop rates are realized. The number of BLAS threads can be set with [`BLAS.set_num_threads(n)`](@ref). diff --git a/base/linalg/givens.jl b/stdlib/LinearAlgebra/src/givens.jl similarity index 98% rename from base/linalg/givens.jl rename to stdlib/LinearAlgebra/src/givens.jl index f305760e8159f8..4c84aac293974e 100644 --- a/base/linalg/givens.jl +++ b/stdlib/LinearAlgebra/src/givens.jl @@ -17,7 +17,7 @@ function _absvecormat_mul_adjrot(A::AbstractVecOrMat{T}, adjR::Adjoint{<:Any,<:A mul!(TS == T ? copy(A) : convert(AbstractArray{TS}, A), adjoint(convert(AbstractRotation{TS}, R))) end """ - LinAlg.Givens(i1,i2,c,s) -> G + LinearAlgebra.Givens(i1,i2,c,s) -> G A Givens rotation linear operator. The fields `c` and `s` represent the cosine and sine of the rotation angle, respectively. The `Givens` type supports left multiplication `G*A` and @@ -258,7 +258,7 @@ y[i1] = r y[i2] = 0 ``` -See also: [`LinAlg.Givens`](@ref) +See also: [`LinearAlgebra.Givens`](@ref) """ function givens(f::T, g::T, i1::Integer, i2::Integer) where T if i1 == i2 @@ -284,7 +284,7 @@ B[i1,j] = r B[i2,j] = 0 ``` -See also: [`LinAlg.Givens`](@ref) +See also: [`LinearAlgebra.Givens`](@ref) """ givens(A::AbstractMatrix, i1::Integer, i2::Integer, j::Integer) = givens(A[i1,j], A[i2,j],i1,i2) @@ -303,7 +303,7 @@ B[i1] = r B[i2] = 0 ``` -See also: [`LinAlg.Givens`](@ref) +See also: [`LinearAlgebra.Givens`](@ref) """ givens(x::AbstractVector, i1::Integer, i2::Integer) = givens(x[i1], x[i2], i1, i2) diff --git a/base/linalg/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl similarity index 100% rename from base/linalg/hessenberg.jl rename to stdlib/LinearAlgebra/src/hessenberg.jl diff --git a/base/linalg/lapack.jl b/stdlib/LinearAlgebra/src/lapack.jl similarity index 99% rename from base/linalg/lapack.jl rename to stdlib/LinearAlgebra/src/lapack.jl index 4204be47f30f5b..3dd4f8d1c3eeac 100644 --- a/base/linalg/lapack.jl +++ b/stdlib/LinearAlgebra/src/lapack.jl @@ -7,11 +7,13 @@ Interfaces to LAPACK subroutines. const liblapack = Base.liblapack_name -import ..LinAlg.BLAS.@blasfunc +import ..LinearAlgebra.BLAS.@blasfunc -import ..LinAlg: BlasFloat, Char, BlasInt, LAPACKException, +import ..LinearAlgebra: BlasFloat, Char, BlasInt, LAPACKException, DimensionMismatch, SingularException, PosDefException, chkstride1, checksquare +using ..LinearAlgebra: triu, dot + using Base: iszero #Generic LAPACK error handlers diff --git a/base/linalg/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl similarity index 100% rename from base/linalg/ldlt.jl rename to stdlib/LinearAlgebra/src/ldlt.jl diff --git a/base/linalg/lq.jl b/stdlib/LinearAlgebra/src/lq.jl similarity index 100% rename from base/linalg/lq.jl rename to stdlib/LinearAlgebra/src/lq.jl diff --git a/base/linalg/lu.jl b/stdlib/LinearAlgebra/src/lu.jl similarity index 99% rename from base/linalg/lu.jl rename to stdlib/LinearAlgebra/src/lu.jl index c38340afe50666..50f070ac883806 100644 --- a/base/linalg/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -43,7 +43,7 @@ julia> A = [4. 3.; 6. 3.] 4.0 3.0 julia> F = lufact!(A) -Base.LinAlg.LU{Float64,Array{Float64,2}} +LinearAlgebra.LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: 1.0 0.0 @@ -162,7 +162,7 @@ julia> A = [4 3; 6 3] 6 3 julia> F = lufact(A) -Base.LinAlg.LU{Float64,Array{Float64,2}} +LinearAlgebra.LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: 1.0 0.0 @@ -592,7 +592,7 @@ AbstractArray(F::LU) = AbstractMatrix(F) Matrix(F::LU) = Array(AbstractArray(F)) Array(F::LU) = Matrix(F) -function Tridiagonal(F::Base.LinAlg.LU{T,Tridiagonal{T,V}}) where {T,V} +function Tridiagonal(F::LU{T,Tridiagonal{T,V}}) where {T,V} n = size(F, 1) dl = copy(F.factors.dl) diff --git a/base/linalg/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl similarity index 99% rename from base/linalg/matmul.jl rename to stdlib/LinearAlgebra/src/matmul.jl index 229667b4bf451c..3f3bf8143db3be 100644 --- a/base/linalg/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -432,7 +432,7 @@ function copyto!(B::AbstractVecOrMat, ir_dest::UnitRange{Int}, jr_dest::UnitRang if tM == 'N' copyto!(B, ir_dest, jr_dest, M, ir_src, jr_src) else - Base.copy_transpose!(B, ir_dest, jr_dest, M, jr_src, ir_src) + LinearAlgebra.copy_transpose!(B, ir_dest, jr_dest, M, jr_src, ir_src) tM == 'C' && conj!(B) end B @@ -440,7 +440,7 @@ end function copy_transpose!(B::AbstractMatrix, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::Char, M::AbstractVecOrMat, ir_src::UnitRange{Int}, jr_src::UnitRange{Int}) if tM == 'N' - Base.copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src) + LinearAlgebra.copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src) else copyto!(B, ir_dest, jr_dest, M, jr_src, ir_src) tM == 'C' && conj!(B) @@ -567,7 +567,7 @@ function _generic_matmatmul!(C::AbstractVecOrMat{R}, tA, tB, A::AbstractVecOrMat z = convert(promote_type(typeof(z1), R), z1) if mA < tile_size && nA < tile_size && nB < tile_size - Base.copy_transpose!(Atile, 1:nA, 1:mA, tA, A, 1:mA, 1:nA) + copy_transpose!(Atile, 1:nA, 1:mA, tA, A, 1:mA, 1:nA) copyto!(Btile, 1:mB, 1:nB, tB, B, 1:mB, 1:nB) for j = 1:nB boff = (j-1)*tile_size @@ -593,7 +593,7 @@ function _generic_matmatmul!(C::AbstractVecOrMat{R}, tA, tB, A::AbstractVecOrMat for kb = 1:tile_size:nA klim = min(kb+tile_size-1,mB) klen = klim-kb+1 - Base.copy_transpose!(Atile, 1:klen, 1:ilen, tA, A, ib:ilim, kb:klim) + copy_transpose!(Atile, 1:klen, 1:ilen, tA, A, ib:ilim, kb:klim) copyto!(Btile, 1:klen, 1:jlen, tB, B, kb:klim, jb:jlim) for j=1:jlen bcoff = (j-1)*tile_size diff --git a/base/linalg/qr.jl b/stdlib/LinearAlgebra/src/qr.jl similarity index 98% rename from base/linalg/qr.jl rename to stdlib/LinearAlgebra/src/qr.jl index 376bdefba6940b..5a1d33382cebb7 100644 --- a/base/linalg/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -184,13 +184,13 @@ function qrfactPivotedUnblocked!(A::StridedMatrix) # Compute reflector of columns j x = view(A, j:m, j) - τj = LinAlg.reflector!(x) + τj = LinearAlgebra.reflector!(x) τ[j] = τj # Update trailing submatrix with reflector - LinAlg.reflectorApply!(x, τj, view(A, j:m, j+1:n)) + LinearAlgebra.reflectorApply!(x, τj, view(A, j:m, j+1:n)) end - return LinAlg.QRPivoted{eltype(A), typeof(A)}(A, τ, piv) + return LinearAlgebra.QRPivoted{eltype(A), typeof(A)}(A, τ, piv) end # LAPACK version @@ -216,7 +216,7 @@ julia> a = [1. 2.; 3. 4.] 3.0 4.0 julia> qrfact!(a) -Base.LinAlg.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R: +LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R: [-0.316228 -0.948683; -0.948683 0.316228] [-3.16228 -4.42719; 0.0 -0.632456] @@ -280,7 +280,7 @@ julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0] 0.0 1.0 julia> F = qrfact(A) -Base.LinAlg.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R: +LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R: [-0.6 0.0 0.8; -0.8 0.0 -0.6; 0.0 -1.0 0.0] [-5.0 10.0; 0.0 -1.0] @@ -346,7 +346,7 @@ Returns `w`, a unit vector in the direction of `v`, and `r`, the norm of `v`. See also [`normalize`](@ref), [`normalize!`](@ref), -and [`LinAlg.qr!`](@ref). +and [`LinearAlgebra.qr!`](@ref). # Examples ```jldoctest @@ -374,7 +374,7 @@ function qr(v::AbstractVector) end """ - LinAlg.qr!(v::AbstractVector) -> w, r + LinearAlgebra.qr!(v::AbstractVector) -> w, r Computes the polar decomposition of a vector. Instead of returning a new vector as `qr(v::AbstractVector)`, this function mutates the input vector `v` in place. @@ -391,7 +391,7 @@ julia> v = [1.; 2.] 1.0 2.0 -julia> w, r = Base.LinAlg.qr!(v) +julia> w, r = LinearAlgebra.qr!(v) ([0.447214, 0.894427], 2.23606797749979) julia> w === v @@ -806,7 +806,7 @@ function ldiv!(A::QR{T}, B::StridedMatrix{T}) where T end end end - Base.LinAlg.ldiv!(UpperTriangular(view(R, :, 1:minmn)), view(B, 1:minmn, :)) + LinearAlgebra.ldiv!(UpperTriangular(view(R, :, 1:minmn)), view(B, 1:minmn, :)) if n > m # Apply elementary transformation to solution B[m + 1:mB,1:nB] = zero(T) for j = 1:nB diff --git a/base/linalg/rowvector.jl b/stdlib/LinearAlgebra/src/rowvector.jl similarity index 100% rename from base/linalg/rowvector.jl rename to stdlib/LinearAlgebra/src/rowvector.jl diff --git a/base/linalg/schur.jl b/stdlib/LinearAlgebra/src/schur.jl similarity index 95% rename from base/linalg/schur.jl rename to stdlib/LinearAlgebra/src/schur.jl index 479c9639d9e196..ca56411cdd0dfa 100644 --- a/base/linalg/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -22,7 +22,7 @@ julia> A = [5. 7.; -2. -4.] -2.0 -4.0 julia> F = schurfact!(A) -Base.LinAlg.Schur{Float64,Array{Float64,2}} with factors T and Z: +LinearAlgebra.Schur{Float64,Array{Float64,2}} with factors T and Z: [3.0 9.0; 0.0 -2.0] [0.961524 0.274721; -0.274721 0.961524] and values: @@ -34,7 +34,7 @@ julia> A 0.0 -2.0 ``` """ -schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinAlg.LAPACK.gees!('V', A)...) +schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) """ schurfact(A::StridedMatrix) -> F::Schur @@ -52,7 +52,7 @@ julia> A = [5. 7.; -2. -4.] -2.0 -4.0 julia> F = schurfact(A) -Base.LinAlg.Schur{Float64,Array{Float64,2}} with factors T and Z: +LinearAlgebra.Schur{Float64,Array{Float64,2}} with factors T and Z: [3.0 9.0; 0.0 -2.0] [0.961524 0.274721; -0.274721 0.961524] and values: @@ -158,7 +158,7 @@ ordschur(schur::Schur, select::Union{Vector{Bool},BitVector}) = Same as [`ordschur`](@ref) but overwrites the input arguments. """ ordschur!(T::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - LinAlg.LAPACK.trsen!(convert(Vector{BlasInt}, select), T, Z)[1:3] + LinearAlgebra.LAPACK.trsen!(convert(Vector{BlasInt}, select), T, Z)[1:3] """ ordschur(T::StridedMatrix, Z::StridedMatrix, select::Union{Vector{Bool},BitVector}) -> T::StridedMatrix, Z::StridedMatrix, λ::Vector @@ -196,7 +196,7 @@ end Same as [`schurfact`](@ref) but uses the input matrices `A` and `B` as workspace. """ schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = - GeneralizedSchur(LinAlg.LAPACK.gges!('V', 'V', A, B)...) + GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) """ schurfact(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur @@ -246,7 +246,7 @@ Same as [`ordschur`](@ref) but overwrites the factorization the input arguments. """ ordschur!(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - LinAlg.LAPACK.tgsen!(convert(Vector{BlasInt}, select), S, T, Q, Z) + LinearAlgebra.LAPACK.tgsen!(convert(Vector{BlasInt}, select), S, T, Q, Z) """ ordschur(S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, select) -> S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, α::Vector, β::Vector diff --git a/base/linalg/special.jl b/stdlib/LinearAlgebra/src/special.jl similarity index 100% rename from base/linalg/special.jl rename to stdlib/LinearAlgebra/src/special.jl diff --git a/base/linalg/svd.jl b/stdlib/LinearAlgebra/src/svd.jl similarity index 100% rename from base/linalg/svd.jl rename to stdlib/LinearAlgebra/src/svd.jl diff --git a/base/linalg/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl similarity index 100% rename from base/linalg/symmetric.jl rename to stdlib/LinearAlgebra/src/symmetric.jl diff --git a/base/linalg/transpose.jl b/stdlib/LinearAlgebra/src/transpose.jl similarity index 100% rename from base/linalg/transpose.jl rename to stdlib/LinearAlgebra/src/transpose.jl diff --git a/base/linalg/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl similarity index 99% rename from base/linalg/triangular.jl rename to stdlib/LinearAlgebra/src/triangular.jl index bfb25b5f14e9e6..0f69a192d00c00 100644 --- a/base/linalg/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -15,7 +15,7 @@ for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, $t(A::$t) = A $t{T}(A::$t{T}) where {T} = A function $t(A::AbstractMatrix) - Base.LinAlg.checksquare(A) + checksquare(A) return $t{eltype(A), typeof(A)}(A) end diff --git a/base/linalg/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl similarity index 100% rename from base/linalg/tridiag.jl rename to stdlib/LinearAlgebra/src/tridiag.jl diff --git a/base/linalg/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl similarity index 99% rename from base/linalg/uniformscaling.jl rename to stdlib/LinearAlgebra/src/uniformscaling.jl index 10f8c0f66d2ed0..fcaa3b4a04e3bf 100644 --- a/base/linalg/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -2,7 +2,6 @@ import Base: copy, adjoint, getindex, show, transpose, one, zero, inv, hcat, vcat, hvcat -import Base.LinAlg: SingularException """ UniformScaling{T<:Number} @@ -242,7 +241,7 @@ function isapprox(J::UniformScaling, A::AbstractMatrix; rtol::Real = Base.rtoldefault(promote_leaf_eltypes(A), eltype(J), atol), nans::Bool = false, norm::Function = vecnorm) n = checksquare(A) - normJ = norm === Base.norm ? abs(J.λ) : + normJ = norm === LinearAlgebra.norm ? abs(J.λ) : norm === vecnorm ? abs(J.λ) * sqrt(n) : norm(Diagonal(fill(J.λ, n))) return norm(A - J) <= max(atol, rtol * max(norm(A), normJ)) @@ -378,7 +377,7 @@ chol(J::UniformScaling, args...) = ((C, info) = _chol!(J, nothing); @assertposde ## Matrix construction from UniformScaling -Matrix{T}(s::UniformScaling, dims::Dims{2}) where {T} = setindex!(zeros(T, dims), T(s.λ), diagind(dims...)) +Matrix{T}(s::UniformScaling, dims::Dims{2}) where {T} = setindex!(Base.zeros(T, dims), T(s.λ), diagind(dims...)) Matrix{T}(s::UniformScaling, m::Integer, n::Integer) where {T} = Matrix{T}(s, Dims((m, n))) Matrix(s::UniformScaling, m::Integer, n::Integer) = Matrix(s, Dims((m, n))) Matrix(s::UniformScaling, dims::Dims{2}) = Matrix{eltype(s)}(s, dims) diff --git a/test/linalg/adjtrans.jl b/stdlib/LinearAlgebra/test/adjtrans.jl similarity index 99% rename from test/linalg/adjtrans.jl rename to stdlib/LinearAlgebra/test/adjtrans.jl index dedcdce6cfa74b..0d5f3362c4ee55 100644 --- a/test/linalg/adjtrans.jl +++ b/stdlib/LinearAlgebra/test/adjtrans.jl @@ -1,10 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# This file is a part of Julia. License is MIT: https://julialang.org/license +module TestAdjointTranspose -using Test -using Base.LinAlg: Adjoint, Transpose -using SparseArrays +using Test, LinearAlgebra, SparseArrays @testset "Adjoint and Transpose inner constructor basics" begin intvec, intmat = [1, 2], [1 2; 3 4] @@ -448,3 +446,5 @@ end @test adjoint!(a, b) === a @test adjoint!(b, a) === b end + +end # module TestAdjointTranspose diff --git a/test/linalg/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl similarity index 96% rename from test/linalg/bidiag.jl rename to stdlib/LinearAlgebra/test/bidiag.jl index 11fa8dfdf57320..7220665b380725 100644 --- a/test/linalg/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -1,9 +1,11 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using Base.LinAlg: mul! -import Base.LinAlg: BlasReal, BlasFloat -using SparseArrays +module TestBidiagonal + +using Test, LinearAlgebra, SparseArrays, Random +using LinearAlgebra: BlasReal, BlasFloat + +include("testutils.jl") # test_approx_eq_modphase n = 10 #Size of test matrix srand(1) @@ -194,7 +196,7 @@ srand(1) end x = T \ b tx = Tfull \ b - @test_throws DimensionMismatch Base.LinAlg.naivesub!(T,Vector{elty}(uninitialized,n+1)) + @test_throws DimensionMismatch LinearAlgebra.naivesub!(T,Vector{elty}(uninitialized,n+1)) @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) @testset "Generic Mat-vec ops" begin @test T*b ≈ Tfull*b @@ -233,7 +235,7 @@ srand(1) d2, v2 = eig(map(elty<:Complex ? ComplexF64 : Float64,Tfull)) @test (uplo == :U ? d1 : reverse(d1)) ≈ d2 if elty <: Real - Test.test_approx_eq_modphase(v1, uplo == :U ? v2 : v2[:,n:-1:1]) + test_approx_eq_modphase(v1, uplo == :U ? v2 : v2[:,n:-1:1]) end end end @@ -246,8 +248,8 @@ srand(1) u2, d2, v2 = svd(T) @test d1 ≈ d2 if elty <: Real - Test.test_approx_eq_modphase(u1, u2) - Test.test_approx_eq_modphase(v1, v2) + test_approx_eq_modphase(u1, u2) + test_approx_eq_modphase(v1, v2) end @test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull)) @inferred svdvals(T) @@ -303,7 +305,7 @@ end @test promote(C,A) isa Tuple{Tridiagonal, Tridiagonal} end -using Base.LinAlg: fillstored!, UnitLowerTriangular +using LinearAlgebra: fillstored!, UnitLowerTriangular @testset "fill! and fillstored!" begin let # fillstored! A = Tridiagonal(randn(2), randn(3), randn(2)) @@ -362,3 +364,5 @@ end @test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Tuple{S}} where S<:Integer) <: Tridiagonal @test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Int}) <: Tridiagonal end + +end # module TestBidiagonal diff --git a/test/linalg/blas.jl b/stdlib/LinearAlgebra/test/blas.jl similarity index 94% rename from test/linalg/blas.jl rename to stdlib/LinearAlgebra/test/blas.jl index 02379415ce6306..b3ecf1a8bfa965 100644 --- a/test/linalg/blas.jl +++ b/stdlib/LinearAlgebra/test/blas.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Random +module TestBLAS -import Base.LinAlg, Base.LinAlg.BlasReal, Base.LinAlg.BlasComplex +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasReal, BlasComplex srand(100) ## BLAS tests - testing the interface code to BLAS routines @@ -16,10 +17,10 @@ srand(100) end U = convert(Array{elty, 2}, U) V = convert(Array{elty, 2}, V) - @test tril(LinAlg.BLAS.syr2k('L','N',U,V)) ≈ tril(U*transpose(V) + V*transpose(U)) - @test triu(LinAlg.BLAS.syr2k('U','N',U,V)) ≈ triu(U*transpose(V) + V*transpose(U)) - @test tril(LinAlg.BLAS.syr2k('L','T',U,V)) ≈ tril(transpose(U)*V + transpose(V)*U) - @test triu(LinAlg.BLAS.syr2k('U','T',U,V)) ≈ triu(transpose(U)*V + transpose(V)*U) + @test tril(LinearAlgebra.BLAS.syr2k('L','N',U,V)) ≈ tril(U*transpose(V) + V*transpose(U)) + @test triu(LinearAlgebra.BLAS.syr2k('U','N',U,V)) ≈ triu(U*transpose(V) + V*transpose(U)) + @test tril(LinearAlgebra.BLAS.syr2k('L','T',U,V)) ≈ tril(transpose(U)*V + transpose(V)*U) + @test triu(LinearAlgebra.BLAS.syr2k('U','T',U,V)) ≈ triu(transpose(U)*V + transpose(V)*U) end if elty in (ComplexF32, ComplexF64) @@ -30,10 +31,10 @@ srand(100) V = complex.(V, V) U = convert(Array{elty, 2}, U) V = convert(Array{elty, 2}, V) - @test tril(LinAlg.BLAS.her2k('L','N',U,V)) ≈ tril(U*V' + V*U') - @test triu(LinAlg.BLAS.her2k('U','N',U,V)) ≈ triu(U*V' + V*U') - @test tril(LinAlg.BLAS.her2k('L','C',U,V)) ≈ tril(U'*V + V'*U) - @test triu(LinAlg.BLAS.her2k('U','C',U,V)) ≈ triu(U'*V + V'*U) + @test tril(LinearAlgebra.BLAS.her2k('L','N',U,V)) ≈ tril(U*V' + V*U') + @test triu(LinearAlgebra.BLAS.her2k('U','N',U,V)) ≈ triu(U*V' + V*U') + @test tril(LinearAlgebra.BLAS.her2k('L','C',U,V)) ≈ tril(U'*V + V'*U) + @test triu(LinearAlgebra.BLAS.her2k('U','C',U,V)) ≈ triu(U'*V + V'*U) end end @@ -264,7 +265,7 @@ srand(100) @test all(BLAS.gemm('N', 'N', el2, I4, I4) .== el2 * I4) @test all(BLAS.gemm('N', 'T', el2, I4, I4) .== el2 * I4) @test all(BLAS.gemm('T', 'N', el2, I4, I4) .== el2 * I4) - @test all(LinAlg.BLAS.gemm('T', 'T', el2, I4, I4) .== el2 * I4) + @test all(LinearAlgebra.BLAS.gemm('T', 'T', el2, I4, I4) .== el2 * I4) I4cp = copy(I4) @test all(BLAS.gemm!('N', 'N', one(elty), I4, I4, elm1, I4cp) .== Z4) @test all(I4cp .== Z4) @@ -297,7 +298,7 @@ srand(100) @test all(tril(BLAS.herk('L', 'C', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4))) ans = similar(L4) @test all(tril(BLAS.herk('L','C', L4)) .== tril(BLAS.herk!('L', 'C', real(one(elty)), L4, real(zero(elty)), ans))) - @test all(Base.LinAlg.copytri!(ans, 'L') .== LinAlg.BLAS.gemm('T', 'N', L4, L4)) + @test all(LinearAlgebra.copytri!(ans, 'L') .== LinearAlgebra.BLAS.gemm('T', 'N', L4, L4)) @test_throws DimensionMismatch BLAS.herk!('L','N',real(one(elty)),Matrix{elty}(I, 5, 5),real(one(elty)), Matrix{elty}(I, 6, 6)) else @test all(triu(BLAS.syrk('U', 'N', U4)) .== triu(BLAS.gemm('N', 'T', U4, U4))) @@ -310,7 +311,7 @@ srand(100) @test all(tril(BLAS.syrk('L', 'T', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4))) ans = similar(L4) @test all(tril(BLAS.syrk('L','T', L4)) .== tril(BLAS.syrk!('L', 'T', one(elty), L4, zero(elty), ans))) - @test all(Base.LinAlg.copytri!(ans, 'L') .== BLAS.gemm('T', 'N', L4, L4)) + @test all(LinearAlgebra.copytri!(ans, 'L') .== BLAS.gemm('T', 'N', L4, L4)) @test_throws DimensionMismatch BLAS.syrk!('L','N',one(elty), Matrix{elty}(I, 5, 5),one(elty), Matrix{elty}(I, 6, 6)) end end @@ -440,3 +441,5 @@ Base.stride(A::WrappedArray, i::Int) = stride(A.A, i) @test C == WrappedArray([63 138+38im; 35+27im 352]) end end + +end # module TestBLAS diff --git a/test/linalg/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl similarity index 90% rename from test/linalg/bunchkaufman.jl rename to stdlib/LinearAlgebra/test/bunchkaufman.jl index 8189023bd2b4a5..9b33dc864d6d58 100644 --- a/test/linalg/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -1,9 +1,10 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestBunchKaufman +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted using Base: getproperty -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted n = 10 @@ -34,11 +35,11 @@ bimg = randn(n,2)/2 ε = εa = eps(abs(float(one(eltya)))) # check that factorize gives a Bunch-Kaufman - @test isa(factorize(asym), LinAlg.BunchKaufman) - @test isa(factorize(aher), LinAlg.BunchKaufman) + @test isa(factorize(asym), LinearAlgebra.BunchKaufman) + @test isa(factorize(aher), LinearAlgebra.BunchKaufman) @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) bc1 = bkfact(Hermitian(aher, uplo)) - @test LinAlg.issuccess(bc1) + @test LinearAlgebra.issuccess(bc1) @test logabsdet(bc1)[1] ≈ log(abs(det(bc1))) if eltya <: Real @test logabsdet(bc1)[2] == sign(det(bc1)) @@ -87,7 +88,7 @@ bimg = randn(n,2)/2 @testset "$uplo Bunch-Kaufman factors of a pos-def matrix" for uplo in (:U, :L) @testset "rook pivoting: $rook" for rook in (false, true) bc2 = bkfact(Hermitian(apd, uplo), rook) - @test LinAlg.issuccess(bc2) + @test LinearAlgebra.issuccess(bc2) bks = split(sprint(show, "text/plain", bc2), "\n") @test bks[1] == summary(bc2) @test bks[2] == "D factor:" @@ -114,13 +115,13 @@ bimg = randn(n,2)/2 @testset "$uplo Bunch-Kaufman factors of a singular matrix" for uplo in (:L, :U) @testset for rook in (false, true) F = bkfact(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook) - @test !LinAlg.issuccess(F) + @test !LinearAlgebra.issuccess(F) # test printing of this as well! bks = sprint(show, "text/plain", F) @test bks == "Failed factorization of type $(typeof(F))" @test det(F) == 0 - @test_throws LinAlg.SingularException inv(F) - @test_throws LinAlg.SingularException F \ fill(1., size(As,1)) + @test_throws LinearAlgebra.SingularException inv(F) + @test_throws LinearAlgebra.SingularException F \ fill(1., size(As,1)) end end end @@ -139,3 +140,5 @@ end @test_throws DomainError logdet(bkfact([-1 -1; -1 1])) @test logabsdet(bkfact([8 4; 4 2]))[1] == -Inf + +end # module TestBunchKaufman diff --git a/test/linalg/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl similarity index 89% rename from test/linalg/cholesky.jl rename to stdlib/LinearAlgebra/test/cholesky.jl index 5db4042b4cc89a..928a691e524fb0 100644 --- a/test/linalg/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestCholesky -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted, PosDefException +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, PosDefException function unary_ops_tests(a, ca, tol; n=size(a, 1)) @test inv(ca)*a ≈ Matrix(I, n, n) @@ -65,7 +66,7 @@ end @testset "throw for non-square input" begin A = rand(eltya, 2, 3) @test_throws DimensionMismatch chol(A) - @test_throws DimensionMismatch Base.LinAlg.chol!(A) + @test_throws DimensionMismatch LinearAlgebra.chol!(A) @test_throws DimensionMismatch cholfact(A) @test_throws DimensionMismatch cholfact!(A) end @@ -83,7 +84,7 @@ end for i=1:n, j=1:n @test E[i,j] <= (n+1)ε/(1-(n+1)ε)*real(sqrt(apd[i,i]*apd[j,j])) end - @test LinAlg.issuccess(capd) + @test LinearAlgebra.issuccess(capd) @inferred(logdet(capd)) apos = apd[1,1] # test chol(x::Number), needs x>0 @@ -129,7 +130,7 @@ end #pivoted upper Cholesky if eltya != BigFloat cz = cholfact(Hermitian(zeros(eltya,n,n)), Val(true)) - @test_throws Base.LinAlg.RankDeficientException Base.LinAlg.chkfullrank(cz) + @test_throws LinearAlgebra.RankDeficientException LinearAlgebra.chkfullrank(cz) cpapd = cholfact(apdh, Val(true)) unary_ops_tests(apdh, cpapd, ε*κ*n) @test rank(cpapd) == n @@ -177,7 +178,7 @@ end A = eltya[1 2; 2 1]; B = eltya[1, 1] C = cholfact(A) @test !isposdef(C) - @test !LinAlg.issuccess(C) + @test !LinearAlgebra.issuccess(C) Cstr = sprint((t, s) -> show(t, "text/plain", s), C) @test Cstr == "Failed factorization of type $(typeof(C))" @test_throws PosDefException C\B @@ -193,8 +194,8 @@ end A = randn(5,5) end A = convert(Matrix{eltya}, A'A) - @test Matrix(cholfact(A).L) ≈ Matrix(invoke(Base.LinAlg._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) - @test Matrix(cholfact(A).U) ≈ Matrix(invoke(Base.LinAlg._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) + @test Matrix(cholfact(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) + @test Matrix(cholfact(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) end end end @@ -202,8 +203,8 @@ end @testset "Cholesky factor of Matrix with non-commutative elements, here 2x2-matrices" begin X = Matrix{Float64}[0.1*rand(2,2) for i in 1:3, j = 1:3] - L = Matrix(Base.LinAlg._chol!(X*X', LowerTriangular)[1]) - U = Matrix(Base.LinAlg._chol!(X*X', UpperTriangular)[1]) + L = Matrix(LinearAlgebra._chol!(X*X', LowerTriangular)[1]) + U = Matrix(LinearAlgebra._chol!(X*X', UpperTriangular)[1]) XX = Matrix(X*X') @test sum(sum(norm, L*L' - XX)) < eps() @@ -220,10 +221,10 @@ end BcB = (BcB + BcB')/2 F = cholfact(Hermitian(AcA, uplo)) G = cholfact(Hermitian(BcB, uplo)) - @test Base.getproperty(LinAlg.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo) - @test_throws DimensionMismatch LinAlg.lowrankupdate(F, Vector{eltype(v)}(uninitialized,length(v)+1)) - @test Base.getproperty(LinAlg.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo) - @test_throws DimensionMismatch LinAlg.lowrankdowndate(G, Vector{eltype(v)}(uninitialized,length(v)+1)) + @test Base.getproperty(LinearAlgebra.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo) + @test_throws DimensionMismatch LinearAlgebra.lowrankupdate(F, Vector{eltype(v)}(uninitialized,length(v)+1)) + @test Base.getproperty(LinearAlgebra.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo) + @test_throws DimensionMismatch LinearAlgebra.lowrankdowndate(G, Vector{eltype(v)}(uninitialized,length(v)+1)) end end @@ -262,13 +263,15 @@ end R = randn(5, 5) C = complex.(R, R) for A in (R, C) - @test !LinAlg.issuccess(cholfact(A)) - @test !LinAlg.issuccess(cholfact!(copy(A))) + @test !LinearAlgebra.issuccess(cholfact(A)) + @test !LinearAlgebra.issuccess(cholfact!(copy(A))) @test_throws PosDefException chol(A) - @test_throws PosDefException Base.LinAlg.chol!(copy(A)) + @test_throws PosDefException LinearAlgebra.chol!(copy(A)) end end @testset "fail for non-BLAS element types" begin @test_throws ArgumentError cholfact!(Hermitian(rand(Float16, 5,5)), Val(true)) end + +end # module TestCholesky diff --git a/test/linalg/dense.jl b/stdlib/LinearAlgebra/test/dense.jl similarity index 99% rename from test/linalg/dense.jl rename to stdlib/LinearAlgebra/test/dense.jl index 88427bd0d49123..4d9284d8d63cf6 100644 --- a/test/linalg/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -1,13 +1,14 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestDense + +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal @testset "Check that non-floats are correctly promoted" begin @test [1 0 0; 0 1 0]\[1,1] ≈ [1;1;0] end -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal - n = 10 # Split n into 2 parts for tests needing two matrices @@ -164,7 +165,7 @@ end @test norm(x, 2) ≈ sqrt(10) @test norm(x, 3) ≈ cbrt(10) @test norm(x, Inf) ≈ 1 - if elty <: Base.LinAlg.BlasFloat + if elty <: LinearAlgebra.BlasFloat @test norm(x, 1:4) ≈ 2 @test_throws BoundsError norm(x,-1:4) @test_throws BoundsError norm(x,1:11) @@ -690,13 +691,13 @@ end #Ah : Hermitian Matrix Ah = convert(Matrix{elty}, [3 1; 1 3]) - if elty <: Base.LinAlg.BlasComplex + if elty <: LinearAlgebra.BlasComplex Ah += [0 im; -im 0] end #ADi : Diagonal Matrix ADi = convert(Matrix{elty}, [3 0; 0 3]) - if elty <: Base.LinAlg.BlasComplex + if elty <: LinearAlgebra.BlasComplex ADi += [im 0; 0 im] end @@ -823,8 +824,8 @@ end A = rand(10,10) B = view(A, 2:2:10, 2:2:10) - @test Base.LinAlg.stride1(a) == 1 - @test Base.LinAlg.stride1(b) == 2 + @test LinearAlgebra.stride1(a) == 1 + @test LinearAlgebra.stride1(b) == 2 @test strides(a) == (1,) @test strides(b) == (2,) @@ -843,3 +844,5 @@ end end end end + +end # module TestDense diff --git a/test/linalg/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl similarity index 98% rename from test/linalg/diagonal.jl rename to stdlib/LinearAlgebra/test/diagonal.jl index ecfd6e6e533bd5..6c467d293f1f0f 100644 --- a/test/linalg/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1,9 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using Base.LinAlg: mul!, ldiv!, rdiv! -import Base.LinAlg: BlasFloat, BlasComplex, SingularException -using SparseArrays +module TestDiagonal + +using Test, LinearAlgebra, SparseArrays, Random +using LinearAlgebra: mul!, ldiv!, rdiv!, BlasFloat, BlasComplex, SingularException n=12 #Size of matrix problem to test srand(1) @@ -336,7 +336,7 @@ end @test Matrix(1.0I, 5, 5) \ Diagonal(fill(1.,5)) == Matrix(I, 5, 5) @testset "Triangular and Diagonal" begin - for T in (LowerTriangular(randn(5,5)), LinAlg.UnitLowerTriangular(randn(5,5))) + for T in (LowerTriangular(randn(5,5)), LinearAlgebra.UnitLowerTriangular(randn(5,5))) D = Diagonal(randn(5)) @test T*D == Array(T)*Array(D) @test T'D == Array(T)'*Array(D) @@ -427,3 +427,5 @@ end @test Diagonal(adjoint([1, 2, 3])) == Diagonal([1 2 3]) @test Diagonal(transpose([1, 2, 3])) == Diagonal([1 2 3]) end + +end # module TestDiagonal diff --git a/test/linalg/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl similarity index 96% rename from test/linalg/eigen.jl rename to stdlib/LinearAlgebra/test/eigen.jl index 9f41d92cec4be7..0b8969078bd07e 100644 --- a/test/linalg/eigen.jl +++ b/stdlib/LinearAlgebra/test/eigen.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestEigen -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted n = 10 @@ -120,3 +121,5 @@ end @test λ ≈ [0.5, 2/3] end end + +end # module TestEigen diff --git a/test/linalg/generic.jl b/stdlib/LinearAlgebra/test/generic.jl similarity index 81% rename from test/linalg/generic.jl rename to stdlib/LinearAlgebra/test/generic.jl index 91768783cca179..86fbb05f8b5007 100644 --- a/test/linalg/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -1,7 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +module TestGeneric + +using Test, LinearAlgebra, Random import Base: -, *, /, \ -using Test, Random # A custom Quaternion type with minimal defined interface and methods. # Used to test scale and scale! methods to show non-commutativity. @@ -102,52 +104,52 @@ end @test_throws ArgumentError diff(X,-1) end -@testset "linrange" begin - # make sure unequal input arrays throw an error - x = [2; 5; 6] - y = [3; 7; 10; 10] - @test_throws DimensionMismatch linreg(x, y) - x = [2 5 6] - y = [3; 7; 10] - @test_throws MethodError linreg(x, y) - - # check (UnitRange, Array) - x = 1:12 - y = [5.5; 6.3; 7.6; 8.8; 10.9; 11.79; 13.48; 15.02; 17.77; 20.81; 22.0; 22.99] - @test [linreg(x,y)...] ≈ [2.5559090909090867, 1.6960139860139862] - @test [linreg(view(x,1:6),view(y,1:6))...] ≈ [3.8366666666666642,1.3271428571428574] - - # check (LinSpace, UnitRange) - x = linspace(1.0, 12.0, 100) - y = -100:-1 - @test [linreg(x, y)...] ≈ [-109.0, 9.0] - - # check (UnitRange, UnitRange) - x = 1:12 - y = 12:-1:1 - @test [linreg(x, y)...] ≈ [13.0, -1.0] - - # check (LinSpace, LinSpace) - x = linspace(-5, 10, 100) - y = linspace(50, 200, 100) - @test [linreg(x, y)...] ≈ [100.0, 10.0] - - # check (Array, Array) - # Anscombe's quartet (https://en.wikipedia.org/wiki/Anscombe%27s_quartet) - x123 = [10.0; 8.0; 13.0; 9.0; 11.0; 14.0; 6.0; 4.0; 12.0; 7.0; 5.0] - y1 = [8.04; 6.95; 7.58; 8.81; 8.33; 9.96; 7.24; 4.26; 10.84; 4.82; 5.68] - @test [linreg(x123,y1)...] ≈ [3.0,0.5] atol=15e-5 - - y2 = [9.14; 8.14; 8.74; 8.77; 9.26; 8.10; 6.12; 3.10; 9.13; 7.26; 4.74] - @test [linreg(x123,y2)...] ≈ [3.0,0.5] atol=10e-3 - - y3 = [7.46; 6.77; 12.74; 7.11; 7.81; 8.84; 6.08; 5.39; 8.15; 6.42; 5.73] - @test [linreg(x123,y3)...] ≈ [3.0,0.5] atol=10e-3 - - x4 = [8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 19.0; 8.0; 8.0; 8.0] - y4 = [6.58; 5.76; 7.71; 8.84; 8.47; 7.04; 5.25; 12.50; 5.56; 7.91; 6.89] - @test [linreg(x4,y4)...] ≈ [3.0,0.5] atol=10e-3 -end +# @testset "linrange" begin +# # make sure unequal input arrays throw an error +# x = [2; 5; 6] +# y = [3; 7; 10; 10] +# @test_throws DimensionMismatch linreg(x, y) +# x = [2 5 6] +# y = [3; 7; 10] +# @test_throws MethodError linreg(x, y) + +# # check (UnitRange, Array) +# x = 1:12 +# y = [5.5; 6.3; 7.6; 8.8; 10.9; 11.79; 13.48; 15.02; 17.77; 20.81; 22.0; 22.99] +# @test [linreg(x,y)...] ≈ [2.5559090909090867, 1.6960139860139862] +# @test [linreg(view(x,1:6),view(y,1:6))...] ≈ [3.8366666666666642,1.3271428571428574] + +# # check (LinSpace, UnitRange) +# x = linspace(1.0, 12.0, 100) +# y = -100:-1 +# @test [linreg(x, y)...] ≈ [-109.0, 9.0] + +# # check (UnitRange, UnitRange) +# x = 1:12 +# y = 12:-1:1 +# @test [linreg(x, y)...] ≈ [13.0, -1.0] + +# # check (LinSpace, LinSpace) +# x = linspace(-5, 10, 100) +# y = linspace(50, 200, 100) +# @test [linreg(x, y)...] ≈ [100.0, 10.0] + +# # check (Array, Array) +# # Anscombe's quartet (https://en.wikipedia.org/wiki/Anscombe%27s_quartet) +# x123 = [10.0; 8.0; 13.0; 9.0; 11.0; 14.0; 6.0; 4.0; 12.0; 7.0; 5.0] +# y1 = [8.04; 6.95; 7.58; 8.81; 8.33; 9.96; 7.24; 4.26; 10.84; 4.82; 5.68] +# @test [linreg(x123,y1)...] ≈ [3.0,0.5] atol=15e-5 + +# y2 = [9.14; 8.14; 8.74; 8.77; 9.26; 8.10; 6.12; 3.10; 9.13; 7.26; 4.74] +# @test [linreg(x123,y2)...] ≈ [3.0,0.5] atol=10e-3 + +# y3 = [7.46; 6.77; 12.74; 7.11; 7.81; 8.84; 6.08; 5.39; 8.15; 6.42; 5.73] +# @test [linreg(x123,y3)...] ≈ [3.0,0.5] atol=10e-3 + +# x4 = [8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 19.0; 8.0; 8.0; 8.0] +# y4 = [6.58; 5.76; 7.71; 8.84; 8.47; 7.04; 5.25; 12.50; 5.56; 7.91; 6.89] +# @test [linreg(x4,y4)...] ≈ [3.0,0.5] atol=10e-3 +# end @testset "diag" begin A = Matrix(1.0I, 4, 4) @@ -161,12 +163,12 @@ end x = ['a','b','c','d','e'] y = ['a','b','c','d','e'] α, β = 'f', 'g' - @test_throws DimensionMismatch Base.LinAlg.axpy!(α,x,['g']) - @test_throws DimensionMismatch Base.LinAlg.axpby!(α,x,β,['g']) - @test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(-1:5),y,Vector(1:7)) - @test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(1:7),y,Vector(-1:5)) - @test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(1:7),y,Vector(1:7)) - @test_throws DimensionMismatch Base.LinAlg.axpy!(α,x,Vector(1:3),y,Vector(1:5)) + @test_throws DimensionMismatch LinearAlgebra.axpy!(α,x,['g']) + @test_throws DimensionMismatch LinearAlgebra.axpby!(α,x,β,['g']) + @test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(-1:5),y,Vector(1:7)) + @test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(1:7),y,Vector(-1:5)) + @test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(1:7),y,Vector(1:7)) + @test_throws DimensionMismatch LinearAlgebra.axpy!(α,x,Vector(1:3),y,Vector(1:5)) end @test !issymmetric(fill(1,5,3)) @@ -183,7 +185,7 @@ end @testset "2-argument version of scale!" begin @test scale!(copy(a), 5.) == a*5 @test scale!(5., copy(a)) == a*5 - b = randn(Base.LinAlg.SCAL_CUTOFF) # make sure we try BLAS path + b = randn(LinearAlgebra.SCAL_CUTOFF) # make sure we try BLAS path subB = view(b, :, :) @test scale!(copy(b), 5.) == b*5 @test scale!(copy(subB), 5.) == subB*5 @@ -265,26 +267,26 @@ end @test norm(x, 3) ≈ cbrt(sqrt(125)+125) end -@testset "LinAlg.axp(b)y! for element type without commutative multiplication" begin +@testset "LinearAlgebra.axp(b)y! for element type without commutative multiplication" begin α = [1 2; 3 4] β = [5 6; 7 8] x = fill([ 9 10; 11 12], 3) y = fill([13 14; 15 16], 3) - axpy = LinAlg.axpy!(α, x, deepcopy(y)) - axpby = LinAlg.axpby!(α, x, β, deepcopy(y)) + axpy = LinearAlgebra.axpy!(α, x, deepcopy(y)) + axpby = LinearAlgebra.axpby!(α, x, β, deepcopy(y)) @test axpy == x .* [α] .+ y @test axpy != [α] .* x .+ y @test axpby == x .* [α] .+ y .* [β] @test axpby != [α] .* x .+ [β] .* y end -@testset "LinAlg.axpy! for x and y of different dimensions" begin +@testset "LinearAlgebra.axpy! for x and y of different dimensions" begin α = 5 x = 2:5 y = fill(1, 2, 4) rx = [1 4] ry = [2 8] - @test LinAlg.axpy!(α, x, rx, y, ry) == [1 1 1 1; 11 1 1 26] + @test LinearAlgebra.axpy!(α, x, rx, y, ry) == [1 1 1 1; 11 1 1 26] end @testset "norm and normalize!" begin vr = [3.0, 4.0] @@ -316,15 +318,15 @@ end @test det([true false; false true]) == det(Matrix(1I, 2, 2)) end -@test_throws ArgumentError Base.LinAlg.char_uplo(:Z) +@test_throws ArgumentError LinearAlgebra.char_uplo(:Z) @testset "Issue 17650" begin @test [0.01311489462160816, Inf] ≈ [0.013114894621608135, Inf] end @testset "Issue 19035" begin - @test Base.LinAlg.promote_leaf_eltypes([1, 2, [3.0, 4.0]]) == Float64 - @test Base.LinAlg.promote_leaf_eltypes([[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]) == ComplexF64 + @test LinearAlgebra.promote_leaf_eltypes([1, 2, [3.0, 4.0]]) == Float64 + @test LinearAlgebra.promote_leaf_eltypes([[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]) == ComplexF64 @test [1, 2, 3] ≈ [1, 2, 3] @test [[1, 2], [3, 4]] ≈ [[1, 2], [3, 4]] @test [[1, 2], [3, 4]] ≈ [[1.0-eps(), 2.0+eps()], [3.0+2eps(), 4.0-1e8eps()]] @@ -351,8 +353,8 @@ Base.one(::Type{ModInt{n}}) where {n} = ModInt{n}(1) Base.one(::ModInt{n}) where {n} = ModInt{n}(1) Base.adjoint(a::ModInt{n}) where {n} = ModInt{n}(conj(a)) Base.transpose(a::ModInt{n}) where {n} = a # see Issue 20978 -Base.LinAlg.Adjoint(a::ModInt{n}) where {n} = adjoint(a) -Base.LinAlg.Transpose(a::ModInt{n}) where {n} = transpose(a) +LinearAlgebra.Adjoint(a::ModInt{n}) where {n} = adjoint(a) +LinearAlgebra.Transpose(a::ModInt{n}) where {n} = transpose(a) @testset "Issue 22042" begin A = [ModInt{2}(1) ModInt{2}(0); ModInt{2}(1) ModInt{2}(1)] @@ -373,7 +375,7 @@ end end @testset "generic functions for checking whether matrices have banded structure" begin - using Base.LinAlg: isbanded + using LinearAlgebra: isbanded pentadiag = [1 2 3; 4 5 6; 7 8 9] tridiag = [1 2 0; 4 5 6; 0 8 9] ubidiag = [1 2 0; 0 5 6; 0 0 9] @@ -425,3 +427,5 @@ end @test isdiag(adiag) end end + +end # module TestGeneric diff --git a/test/linalg/givens.jl b/stdlib/LinearAlgebra/test/givens.jl similarity index 94% rename from test/linalg/givens.jl rename to stdlib/LinearAlgebra/test/givens.jl index bb0b04ca9af17e..fd722d8aa48681 100644 --- a/test/linalg/givens.jl +++ b/stdlib/LinearAlgebra/test/givens.jl @@ -1,7 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using Base.LinAlg: mul! +module TestGivens + +using Test, LinearAlgebra, Random # Test givens rotations @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) @@ -12,7 +13,7 @@ using Base.LinAlg: mul! end @testset for A in (raw_A, view(raw_A, 1:10, 1:10)) Ac = copy(A) - R = Base.LinAlg.Rotation(Base.LinAlg.Givens{elty}[]) + R = LinearAlgebra.Rotation(LinearAlgebra.Givens{elty}[]) for j = 1:8 for i = j+2:10 G, _ = givens(A, j+1, i, j) @@ -66,3 +67,5 @@ using Base.LinAlg: mul! end end end + +end # module TestGivens diff --git a/test/linalg/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl similarity index 91% rename from test/linalg/hessenberg.jl rename to stdlib/LinearAlgebra/test/hessenberg.jl index 6e155d46ef1f44..b649511a8a1dfd 100644 --- a/test/linalg/hessenberg.jl +++ b/stdlib/LinearAlgebra/test/hessenberg.jl @@ -1,8 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestHessenberg -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted +using Test, LinearAlgebra, Random let n = 10 srand(1234321) @@ -31,3 +31,5 @@ let n = 10 end end end + +end # module TestHessenberg diff --git a/test/linalg/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl similarity index 95% rename from test/linalg/lapack.jl rename to stdlib/LinearAlgebra/test/lapack.jl index 07c956d1a5c121..d3fcd87319b4a6 100644 --- a/test/linalg/lapack.jl +++ b/stdlib/LinearAlgebra/test/lapack.jl @@ -1,15 +1,14 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestLAPACK -import Base.LinAlg.BlasInt +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasInt - - -@test_throws ArgumentError Base.LinAlg.LAPACK.chkuplo('Z') -@test_throws ArgumentError Base.LinAlg.LAPACK.chkside('Z') -@test_throws ArgumentError Base.LinAlg.LAPACK.chkdiag('Z') -@test_throws ArgumentError Base.LinAlg.LAPACK.chktrans('Z') +@test_throws ArgumentError LinearAlgebra.LAPACK.chkuplo('Z') +@test_throws ArgumentError LinearAlgebra.LAPACK.chkside('Z') +@test_throws ArgumentError LinearAlgebra.LAPACK.chkdiag('Z') +@test_throws ArgumentError LinearAlgebra.LAPACK.chktrans('Z') @testset "syevr" begin srand(123) @@ -107,7 +106,7 @@ end A = diagm(-2 => dl2, -1 => dl, 0 => d, 1 => du) @test A\C ≈ D @test_throws DimensionMismatch LAPACK.gbtrs!('N',2,1,6,AB,ipiv,Matrix{elty}(uninitialized,7,6)) - @test_throws Base.LinAlg.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(elty,6,6)) + @test_throws LinearAlgebra.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(elty,6,6)) end end @@ -115,7 +114,7 @@ end @testset "geqp3, geqrt error handling" begin @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) x10, x11 = Vector{elty}.(uninitialized, (10, 11)) - y10, y11 = Vector{Base.LinAlg.BlasInt}.(uninitialized, (10, 11)) + y10, y11 = Vector{LinearAlgebra.BlasInt}.(uninitialized, (10, 11)) A10x10, A11x10, A10x11, A11x11 = Matrix{elty}.(uninitialized, ((10,10), (11,10), (10,11), (11,11))) @test_throws DimensionMismatch LAPACK.geqlf!(A10x10, x11) @test_throws DimensionMismatch LAPACK.gelqf!(A10x10, x11) @@ -132,7 +131,7 @@ end @testset "gels, gesv, getrs, getri error handling" begin @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) A10x10, B11x11 = Matrix{elty}.(uninitialized, ((10,10), (11,11))) - x10, x11 = Vector{Base.LinAlg.BlasInt}.(uninitialized, (10, 11)) + x10, x11 = Vector{LinearAlgebra.BlasInt}.(uninitialized, (10, 11)) @test_throws DimensionMismatch LAPACK.gels!('N',A10x10,B11x11) @test_throws DimensionMismatch LAPACK.gels!('T',A10x10,B11x11) @test_throws DimensionMismatch LAPACK.gesv!(A10x10,B11x11) @@ -563,14 +562,14 @@ end @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) T = triu(rand(elty,10,10)) S = copy(T) - select = zeros(Base.LinAlg.BlasInt,10) + select = zeros(LinearAlgebra.BlasInt,10) select[1] = 1 select,Vr = LAPACK.trevc!('R','S',select,copy(T)) @test Vr ≈ eigvecs(S)[:,1] - select = zeros(Base.LinAlg.BlasInt,10) + select = zeros(LinearAlgebra.BlasInt,10) select[1] = 1 select,Vl = LAPACK.trevc!('L','S',select,copy(T)) - select = zeros(Base.LinAlg.BlasInt,10) + select = zeros(LinearAlgebra.BlasInt,10) select[1] = 1 select,Vln,Vrn = LAPACK.trevc!('B','S',select,copy(T)) @test Vrn ≈ eigvecs(S)[:,1] @@ -592,7 +591,7 @@ end for c in ('V', 'N') A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4]) T,Q,d = schur(A) - s, sep = Base.LinAlg.LAPACK.trsen!(job,c,Array{LinAlg.BlasInt}([0,1,0,0]),T,Q)[4:5] + s, sep = LinearAlgebra.LAPACK.trsen!(job,c,Array{LinearAlgebra.BlasInt}([0,1,0,0]),T,Q)[4:5] @test d[1] ≈ T[2,2] @test d[2] ≈ T[1,1] if c == 'V' @@ -618,7 +617,7 @@ end for c in ('V', 'N') A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4]) T,Q,d = schur(A) - Base.LinAlg.LAPACK.trexc!(c,LinAlg.BlasInt(1),LinAlg.BlasInt(2),T,Q) + LinearAlgebra.LAPACK.trexc!(c,LinearAlgebra.BlasInt(1),LinearAlgebra.BlasInt(2),T,Q) @test d[1] ≈ T[2,2] @test d[2] ≈ T[1,1] if c == 'V' @@ -638,16 +637,16 @@ end A = convert(Matrix{elty}, complex.(randn(10,nn),randn(10,nn))) end ## LU (only equal for real because LAPACK uses different absolute value when choosing permutations) if elty <: Real - FJulia = Base.LinAlg.generic_lufact!(copy(A)) - FLAPACK = Base.LinAlg.LAPACK.getrf!(copy(A)) + FJulia = LinearAlgebra.generic_lufact!(copy(A)) + FLAPACK = LinearAlgebra.LAPACK.getrf!(copy(A)) @test FJulia.factors ≈ FLAPACK[1] @test FJulia.ipiv ≈ FLAPACK[2] @test FJulia.info ≈ FLAPACK[3] end ## QR - FJulia = LinAlg.qrfactUnblocked!(copy(A)) - FLAPACK = Base.LinAlg.LAPACK.geqrf!(copy(A)) + FJulia = LinearAlgebra.qrfactUnblocked!(copy(A)) + FLAPACK = LinearAlgebra.LAPACK.geqrf!(copy(A)) @test FJulia.factors ≈ FLAPACK[1] @test FJulia.τ ≈ FLAPACK[2] end @@ -663,3 +662,5 @@ end let A = [NaN NaN; NaN NaN] @test_throws ArgumentError eigfact(A) end + +end # module TestLAPACK diff --git a/test/linalg/lq.jl b/stdlib/LinearAlgebra/test/lq.jl similarity index 96% rename from test/linalg/lq.jl rename to stdlib/LinearAlgebra/test/lq.jl index 63e353e7ea4070..dc63e77a0441c5 100644 --- a/test/linalg/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -1,9 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random - -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, mul! +module TestLQ +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, mul! n = 10 @@ -21,8 +21,8 @@ breal = randn(n,2)/2 bimg = randn(n,2)/2 # helper functions to unambiguously recover explicit forms of an LQPackedQ -squareQ(Q::LinAlg.LQPackedQ) = (n = size(Q.factors, 2); mul!(Q, Matrix{eltype(Q)}(I, n, n))) -rectangularQ(Q::LinAlg.LQPackedQ) = convert(Array, Q) +squareQ(Q::LinearAlgebra.LQPackedQ) = (n = size(Q.factors, 2); mul!(Q, Matrix{eltype(Q)}(I, n, n))) +rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) @testset for eltya in (Float32, Float64, ComplexF32, ComplexF64) a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) @@ -146,7 +146,7 @@ end @testset "getindex on LQPackedQ (#23733)" begin local m, n - function getqs(F::Base.LinAlg.LQ) + function getqs(F::LinearAlgebra.LQ) implicitQ = F.Q sq = size(implicitQ.factors, 2) explicitQ = mul!(implicitQ, Matrix{eltype(implicitQ)}(I, sq, sq)) @@ -188,7 +188,7 @@ end end @testset "postmultiplication with / right-application of LQPackedQ (#23779)" begin - function getqs(F::Base.LinAlg.LQ) + function getqs(F::LinearAlgebra.LQ) implicitQ = F.Q explicitQ = mul!(implicitQ, Matrix{eltype(implicitQ)}(I, size(implicitQ)...)) return implicitQ, explicitQ @@ -223,3 +223,5 @@ end @test_throws DimensionMismatch C * adjoint(implicitQ) @test_throws DimensionMismatch adjoint(C) * adjoint(implicitQ) end + +end # module TestLQ diff --git a/test/linalg/lu.jl b/stdlib/LinearAlgebra/test/lu.jl similarity index 93% rename from test/linalg/lu.jl rename to stdlib/LinearAlgebra/test/lu.jl index e25fa73d4be640..c856a4cc17a93c 100644 --- a/test/linalg/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test -using Base.LinAlg: ldiv! -import Base.LinAlg.BlasInt, Base.LinAlg.BlasFloat +module TestLU + +using Test, LinearAlgebra, Random +using LinearAlgebra: ldiv!, BlasInt, BlasFloat n = 10 @@ -58,7 +59,7 @@ dimg = randn(n)/2 end @testset "Singular LU" begin lua = lufact(zeros(eltya, 3, 3)) - @test !LinAlg.issuccess(lua) + @test !LinearAlgebra.issuccess(lua) @test sprint((t, s) -> show(t, "text/plain", s), lua) == "Failed factorization of type $(typeof(lua))" end κ = cond(a,1) @@ -74,7 +75,7 @@ dimg = randn(n)/2 @test copy(lua) == lua if eltya <: BlasFloat # test conversion of LU factorization's numerical type - bft = eltya <: Real ? Base.LinAlg.LU{BigFloat} : Base.LinAlg.LU{Complex{BigFloat}} + bft = eltya <: Real ? LinearAlgebra.LU{BigFloat} : LinearAlgebra.LU{Complex{BigFloat}} bflua = convert(bft, lua) @test bflua.L*bflua.U ≈ big.(a)[p,:] rtol=ε end @@ -86,7 +87,7 @@ dimg = randn(n)/2 κd = cond(Array(d),1) @testset "Tridiagonal LU" begin lud = lufact(d) - @test LinAlg.issuccess(lud) + @test LinearAlgebra.issuccess(lud) @test lufact(lud) == lud @test_throws ErrorException lud.Z @test lud.L*lud.U ≈ lud.P*Array(d) @@ -146,14 +147,14 @@ dimg = randn(n)/2 @test_throws DimensionMismatch lud\f @test_throws DimensionMismatch transpose(lud)\f @test_throws DimensionMismatch lud'\f - @test_throws DimensionMismatch Base.LinAlg.ldiv!(transpose(lud), f) + @test_throws DimensionMismatch LinearAlgebra.ldiv!(transpose(lud), f) let Bs = copy(b) for bb in (Bs, view(Bs, 1:n, 1)) @test norm(d*(lud\bb) - bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns if eltya <: Real @test norm((transpose(lud)\bb) - Array(transpose(d))\bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns if eltya != Int && eltyb != Int - @test norm(Base.LinAlg.ldiv!(transpose(lud), copy(bb)) - Array(transpose(d))\bb, 1) < ε*κd*n*2 + @test norm(LinearAlgebra.ldiv!(transpose(lud), copy(bb)) - Array(transpose(d))\bb, 1) < ε*κd*n*2 end end if eltya <: Complex @@ -173,7 +174,7 @@ dimg = randn(n)/2 du[1] = zero(eltya) dl[1] = zero(eltya) zT = Tridiagonal(dl,dd,du) - @test !LinAlg.issuccess(lufact(zT)) + @test !LinearAlgebra.issuccess(lufact(zT)) end end @testset "Thin LU" begin @@ -242,7 +243,7 @@ end end @testset "Issue 21453" begin - @test_throws ArgumentError LinAlg._cond1Inf(lufact(randn(5,5)), 2, 2.0) + @test_throws ArgumentError LinearAlgebra._cond1Inf(lufact(randn(5,5)), 2, 2.0) end @testset "REPL printing" begin @@ -250,7 +251,7 @@ end show(bf, "text/plain", lufact(Matrix(I, 4, 4))) seekstart(bf) @test String(take!(bf)) == """ -Base.LinAlg.LU{Float64,Array{Float64,2}} +LinearAlgebra.LU{Float64,Array{Float64,2}} L factor: 4×4 Array{Float64,2}: 1.0 0.0 0.0 0.0 @@ -264,3 +265,5 @@ U factor: 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0""" end + +end # module TestLU diff --git a/test/linalg/matmul.jl b/stdlib/LinearAlgebra/test/matmul.jl similarity index 88% rename from test/linalg/matmul.jl rename to stdlib/LinearAlgebra/test/matmul.jl index b51b4368722709..03d65760d2210e 100644 --- a/test/linalg/matmul.jl +++ b/stdlib/LinearAlgebra/test/matmul.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestMatmul -using Base.LinAlg: mul! +using Test, LinearAlgebra, Random +using LinearAlgebra: mul! ## Test Julia fallbacks to BLAS routines @@ -102,11 +103,11 @@ end @test mul!(C, transpose(A), B) == A'*B @test mul!(C, A, transpose(B)) == A*B' @test mul!(C, transpose(A), transpose(B)) == A'*B' - @test Base.LinAlg.mul!(C, adjoint(A), transpose(B)) == A'*transpose(B) + @test LinearAlgebra.mul!(C, adjoint(A), transpose(B)) == A'*transpose(B) #test DimensionMismatch for generic_matmatmul - @test_throws DimensionMismatch Base.LinAlg.mul!(C, adjoint(A), transpose(fill(1,4,4))) - @test_throws DimensionMismatch Base.LinAlg.mul!(C, adjoint(fill(1,4,4)), transpose(B)) + @test_throws DimensionMismatch LinearAlgebra.mul!(C, adjoint(A), transpose(fill(1,4,4))) + @test_throws DimensionMismatch LinearAlgebra.mul!(C, adjoint(fill(1,4,4)), transpose(B)) end vv = [1,2] CC = Matrix{Int}(uninitialized, 2, 2) @@ -119,8 +120,8 @@ end AA = rand(5,5) BB = rand(5) for A in (copy(AA), view(AA, 1:5, 1:5)), B in (copy(BB), view(BB, 1:5)) - @test_throws DimensionMismatch Base.LinAlg.generic_matvecmul!(zeros(6),'N',A,B) - @test_throws DimensionMismatch Base.LinAlg.generic_matvecmul!(B,'N',A,zeros(6)) + @test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(zeros(6),'N',A,B) + @test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(B,'N',A,zeros(6)) end vv = [1,2,3] CC = Matrix{Int}(uninitialized, 3, 3) @@ -139,9 +140,9 @@ end BB = rand(Float64,6,6) CC = zeros(Float64,6,6) for A in (copy(AA), view(AA, 1:6, 1:6)), B in (copy(BB), view(BB, 1:6, 1:6)), C in (copy(CC), view(CC, 1:6, 1:6)) - @test Base.LinAlg.mul!(C, transpose(A), transpose(B)) == transpose(A)*transpose(B) - @test Base.LinAlg.mul!(C, A, adjoint(B)) == A*transpose(B) - @test Base.LinAlg.mul!(C, adjoint(A), B) == transpose(A)*B + @test LinearAlgebra.mul!(C, transpose(A), transpose(B)) == transpose(A)*transpose(B) + @test LinearAlgebra.mul!(C, A, adjoint(B)) == A*transpose(B) + @test LinearAlgebra.mul!(C, adjoint(A), B) == transpose(A)*B end end @@ -194,8 +195,8 @@ end @test *(adjoint(Asub), Asub) == *(adjoint(Aref), Aref) A5x5, A6x5 = Matrix{Float64}.(uninitialized, ((5, 5), (6, 5))) - @test_throws DimensionMismatch Base.LinAlg.syrk_wrapper!(A5x5,'N',A6x5) - @test_throws DimensionMismatch Base.LinAlg.herk_wrapper!(A5x5,'N',A6x5) + @test_throws DimensionMismatch LinearAlgebra.syrk_wrapper!(A5x5,'N',A6x5) + @test_throws DimensionMismatch LinearAlgebra.herk_wrapper!(A5x5,'N',A6x5) end @testset "matmul for types w/o sizeof (issue #1282)" begin @@ -261,24 +262,24 @@ end @test A*b == Vector{Float64}[[2,2,1], [2,2]] end -@test_throws ArgumentError Base.LinAlg.copytri!(Matrix{Float64}(uninitialized,10,10),'Z') +@test_throws ArgumentError LinearAlgebra.copytri!(Matrix{Float64}(uninitialized,10,10),'Z') @testset "gemv! and gemm_wrapper for $elty" for elty in [Float32,Float64,ComplexF64,ComplexF32] A10x10, x10, x11 = Array{elty}.(uninitialized, ((10,10), 10, 11)) - @test_throws DimensionMismatch Base.LinAlg.gemv!(x10,'N',A10x10,x11) - @test_throws DimensionMismatch Base.LinAlg.gemv!(x11,'N',A10x10,x10) - @test Base.LinAlg.gemv!(elty[], 'N', Matrix{elty}(uninitialized,0,0), elty[]) == elty[] - @test Base.LinAlg.gemv!(x10, 'N', Matrix{elty}(uninitialized,10,0), elty[]) == zeros(elty,10) + @test_throws DimensionMismatch LinearAlgebra.gemv!(x10,'N',A10x10,x11) + @test_throws DimensionMismatch LinearAlgebra.gemv!(x11,'N',A10x10,x10) + @test LinearAlgebra.gemv!(elty[], 'N', Matrix{elty}(uninitialized,0,0), elty[]) == elty[] + @test LinearAlgebra.gemv!(x10, 'N', Matrix{elty}(uninitialized,10,0), elty[]) == zeros(elty,10) I0x0 = Matrix{elty}(I, 0, 0) I10x10 = Matrix{elty}(I, 10, 10) I10x11 = Matrix{elty}(I, 10, 11) - @test Base.LinAlg.gemm_wrapper('N','N', I10x10, I10x10) == I10x10 - @test_throws DimensionMismatch Base.LinAlg.gemm_wrapper!(I10x10,'N','N', I10x11, I10x10) - @test_throws DimensionMismatch Base.LinAlg.gemm_wrapper!(I10x10,'N','N', I0x0, I0x0) + @test LinearAlgebra.gemm_wrapper('N','N', I10x10, I10x10) == I10x10 + @test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10,'N','N', I10x11, I10x10) + @test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10,'N','N', I0x0, I0x0) A = rand(elty,3,3) - @test Base.LinAlg.matmul3x3('T','N',A, Matrix{elty}(I, 3, 3)) == transpose(A) + @test LinearAlgebra.matmul3x3('T','N',A, Matrix{elty}(I, 3, 3)) == transpose(A) end @testset "#13593, #13488" begin @@ -295,7 +296,8 @@ end struct RootInt i::Int end -import Base: *, adjoint, transpose, Adjoint, Transpose +import Base: *, adjoint, transpose +import LinearAlgebra: Adjoint, Transpose (*)(x::RootInt, y::RootInt) = x.i*y.i adjoint(x::RootInt) = x transpose(x::RootInt) = x @@ -399,3 +401,5 @@ module TestPR18218 @test typeof(d) == Vector{TypeC} @test d == TypeC[5, 11] end + +end # module TestMatmul diff --git a/test/linalg/pinv.jl b/stdlib/LinearAlgebra/test/pinv.jl similarity index 97% rename from test/linalg/pinv.jl rename to stdlib/LinearAlgebra/test/pinv.jl index 398e0318171e15..c903482f806eb6 100644 --- a/test/linalg/pinv.jl +++ b/stdlib/LinearAlgebra/test/pinv.jl @@ -1,10 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# -# Test the pseudo-inverse -# +module TestPinv -using Test, Random +using Test, LinearAlgebra, Random srand(12345) @@ -160,7 +158,7 @@ end @test a.diag[2] ≈ 0.0 end - if eltya <: Base.LinAlg.BlasReal + if eltya <: LinearAlgebra.BlasReal @testset "sub-normal numbers/vectors/matrices" begin a = pinv(realmin(eltya)/100) @test a ≈ 0.0 @@ -185,3 +183,5 @@ end end end end + +end # module TestPinv diff --git a/test/linalg/qr.jl b/stdlib/LinearAlgebra/test/qr.jl similarity index 94% rename from test/linalg/qr.jl rename to stdlib/LinearAlgebra/test/qr.jl index 53151902c79888..b3ace0f81089c4 100644 --- a/test/linalg/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestQR -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted, mul! +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, mul! n = 10 @@ -20,8 +21,8 @@ breal = randn(n,2)/2 bimg = randn(n,2)/2 # helper functions to unambiguously recover explicit forms of an implicit QR Q -squareQ(Q::LinAlg.AbstractQ) = (sq = size(Q.factors, 1); mul!(Q, Matrix{eltype(Q)}(I, sq, sq))) -rectangularQ(Q::LinAlg.AbstractQ) = convert(Array, Q) +squareQ(Q::LinearAlgebra.AbstractQ) = (sq = size(Q.factors, 1); mul!(Q, Matrix{eltype(Q)}(I, sq, sq))) +rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) raw_a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) @@ -140,8 +141,8 @@ rectangularQ(Q::LinAlg.AbstractQ) = convert(Array, Q) @test mul!(squareQ(q), adjoint(q)) ≈ Matrix(I, n, n) @test_throws DimensionMismatch mul!(Matrix{eltya}(I, n+1, n+1), adjoint(q)) @test_throws BoundsError size(q,-1) - @test_throws DimensionMismatch Base.LinAlg.mul!(q,zeros(eltya,n1+1)) - @test_throws DimensionMismatch Base.LinAlg.mul!(adjoint(q), zeros(eltya,n1+1)) + @test_throws DimensionMismatch LinearAlgebra.mul!(q,zeros(eltya,n1+1)) + @test_throws DimensionMismatch LinearAlgebra.mul!(adjoint(q), zeros(eltya,n1+1)) qra = qrfact(a[:,1:n1], Val(false)) q, r = qra.Q, qra.R @@ -185,7 +186,7 @@ end @testset "QR on Ints" begin @test qr(Int[]) == (Int[],1) - @test Base.LinAlg.qr!(Int[1]) == (Int[1],1) + @test LinearAlgebra.qr!(Int[1]) == (Int[1],1) B = rand(7,2) @test (1:7)\B ≈ Vector(1:7)\B @@ -211,3 +212,5 @@ end A = rand(1//1:5//5, 4,3) @test first(qr(A)) == first(qr(float(A))) end + +end # module TestQR diff --git a/stdlib/LinearAlgebra/test/runtests.jl b/stdlib/LinearAlgebra/test/runtests.jl new file mode 100644 index 00000000000000..ffb8fb7540efee --- /dev/null +++ b/stdlib/LinearAlgebra/test/runtests.jl @@ -0,0 +1,24 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +include("triangular.jl") +include("qr.jl") +include("dense.jl") +include("matmul.jl") +include("schur.jl") +include("special.jl") +include("eigen.jl") +include("bunchkaufman.jl") +include("svd.jl") +include("lapack.jl") +include("tridiag.jl") +include("bidiag.jl") +include("diagonal.jl") +include("cholesky.jl") +include("lu.jl") +include("symmetric.jl") +include("generic.jl") +include("uniformscaling.jl") +include("lq.jl") +include("hessenberg.jl") +include("blas.jl") +include("adjtrans.jl") diff --git a/test/linalg/schur.jl b/stdlib/LinearAlgebra/test/schur.jl similarity index 92% rename from test/linalg/schur.jl rename to stdlib/LinearAlgebra/test/schur.jl index d6dc09132aa3b4..78e5fedfe277c0 100644 --- a/test/linalg/schur.jl +++ b/stdlib/LinearAlgebra/test/schur.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestSchur -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted n = 10 @@ -59,7 +60,7 @@ aimg = randn(n,n)/2 sum(select) != 0 && @test S.values[findall(select)] ≈ O.values[1:sum(select)] @test O.vectors*O.Schur*O.vectors' ≈ ordschura @test_throws ErrorException f.A - Snew = Base.LinAlg.Schur(S.T, S.Z, S.values) + Snew = LinearAlgebra.Schur(S.T, S.Z, S.values) SchurNew = ordschur!(copy(Snew), select) @test O.vectors ≈ SchurNew.vectors @test O.Schur ≈ SchurNew.Schur @@ -92,7 +93,7 @@ aimg = randn(n,n)/2 # Make sure that we have sorted it correctly @test NS.values[findall(select)] ≈ S.values[1:m] - Snew = Base.LinAlg.GeneralizedSchur(NS.S, NS.T, NS.alpha, NS.beta, NS.Q, NS.Z) + Snew = LinearAlgebra.GeneralizedSchur(NS.S, NS.T, NS.alpha, NS.beta, NS.Q, NS.Z) SchurNew = ordschur!(copy(Snew), select) @test S.Q ≈ SchurNew.Q @test S.S ≈ SchurNew.S @@ -108,9 +109,11 @@ aimg = randn(n,n)/2 end end @testset "0x0 matrix" for A in (zeros(eltya, 0, 0), view(rand(eltya, 2, 2), 1:0, 1:0)) - T, Z, λ = Base.LinAlg.schur(A) + T, Z, λ = LinearAlgebra.schur(A) @test T == A @test Z == A @test λ == zeros(0) end end + +end # module TestSchur diff --git a/test/linalg/special.jl b/stdlib/LinearAlgebra/test/special.jl similarity index 95% rename from test/linalg/special.jl rename to stdlib/LinearAlgebra/test/special.jl index e63384323d0e90..8799c298f5e3e0 100644 --- a/test/linalg/special.jl +++ b/stdlib/LinearAlgebra/test/special.jl @@ -1,9 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using SparseArrays +module TestSpecial -using Base.LinAlg: mul! +using Test, LinearAlgebra, SparseArrays, Random +using LinearAlgebra: mul! n= 10 #Size of matrix to test srand(1) @@ -100,7 +100,7 @@ end end C = rand(n,n) - for TriType in [Base.LinAlg.UnitLowerTriangular, Base.LinAlg.UnitUpperTriangular, UpperTriangular, LowerTriangular] + for TriType in [LinearAlgebra.UnitLowerTriangular, LinearAlgebra.UnitUpperTriangular, UpperTriangular, LowerTriangular] D = TriType(C) for Spectype in [Diagonal, Bidiagonal, Tridiagonal, Matrix] @test Matrix(D + convert(Spectype,A)) ≈ Matrix(D + A) @@ -112,7 +112,7 @@ end end @testset "Triangular Types and QR" begin - for typ in [UpperTriangular,LowerTriangular,Base.LinAlg.UnitUpperTriangular,Base.LinAlg.UnitLowerTriangular] + for typ in [UpperTriangular,LowerTriangular,LinearAlgebra.UnitUpperTriangular,LinearAlgebra.UnitLowerTriangular] a = rand(n,n) atri = typ(a) b = rand(n,n) @@ -174,8 +174,8 @@ end N = 4 # The tested annotation types testfull = Bool(parse(Int,(get(ENV, "JULIA_TESTFULL", "0")))) - utriannotations = (UpperTriangular, Base.LinAlg.UnitUpperTriangular) - ltriannotations = (LowerTriangular, Base.LinAlg.UnitLowerTriangular) + utriannotations = (UpperTriangular, LinearAlgebra.UnitUpperTriangular) + ltriannotations = (LowerTriangular, LinearAlgebra.UnitLowerTriangular) triannotations = (utriannotations..., ltriannotations...) symannotations = (Symmetric, Hermitian) annotations = testfull ? (triannotations..., symannotations...) : (LowerTriangular, Symmetric) @@ -251,3 +251,5 @@ end @testset "vcat of Vectors with SparseVectors should yield SparseVector (#22225)" begin @test isa((@inferred vcat(Float64[], spzeros(1))), SparseVector) end + +end # module TestSpecial diff --git a/test/linalg/svd.jl b/stdlib/LinearAlgebra/test/svd.jl similarity index 96% rename from test/linalg/svd.jl rename to stdlib/LinearAlgebra/test/svd.jl index be88caf62203cd..8071e604f758a8 100644 --- a/test/linalg/svd.jl +++ b/stdlib/LinearAlgebra/test/svd.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +module TestSVD -using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted +using Test, LinearAlgebra, Random +using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted @testset "Simple svdvals / svdfact tests" begin ≊(x,y) = isapprox(x,y,rtol=1e-15) @@ -97,7 +98,7 @@ a2img = randn(n,n)/2 @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ c end end - if eltya <: Base.LinAlg.BlasReal + if eltya <: LinearAlgebra.BlasReal @testset "Number input" begin x, y = randn(eltya, 2) @test svdfact(x) == svdfact(fill(x, 1, 1)) @@ -122,3 +123,5 @@ a2img = randn(n,n)/2 end end end + +end # module TestSVD diff --git a/test/linalg/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl similarity index 98% rename from test/linalg/symmetric.jl rename to stdlib/LinearAlgebra/test/symmetric.jl index 0943d92a0822fd..0046e9704201ac 100644 --- a/test/linalg/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -1,7 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using SparseArrays +module TestSymmetric + +using Test, LinearAlgebra, SparseArrays, Random srand(101) @@ -193,7 +194,7 @@ end @test inv(Hermitian(a, uplo))::Hermitian ≈ inv(Matrix(Hermitian(a, uplo))) end end - if eltya <: Base.LinAlg.BlasComplex + if eltya <: LinearAlgebra.BlasComplex @testset "inverse edge case with complex Hermitian" begin # Hermitian matrix, where inv(lufact(A)) generates non-real diagonal elements for T in (ComplexF32, ComplexF64) @@ -308,14 +309,14 @@ end @test a * Hermitian(aherm) ≈ a * aherm @test Hermitian(aherm) * Hermitian(aherm) ≈ aherm*aherm @test_throws DimensionMismatch Hermitian(aherm) * Vector{eltya}(uninitialized, n+1) - Base.LinAlg.mul!(C,a,Hermitian(aherm)) + LinearAlgebra.mul!(C,a,Hermitian(aherm)) @test C ≈ a*aherm @test Symmetric(asym) * Symmetric(asym) ≈ asym*asym @test Symmetric(asym) * a ≈ asym * a @test a * Symmetric(asym) ≈ a * asym @test_throws DimensionMismatch Symmetric(asym) * Vector{eltya}(uninitialized, n+1) - Base.LinAlg.mul!(C,a,Symmetric(asym)) + LinearAlgebra.mul!(C,a,Symmetric(asym)) @test C ≈ a*asym tri_b = UpperTriangular(triu(b)) @@ -485,3 +486,5 @@ end @test A.data == (uplo == :U ? [2 2; 1.0+im 2] : [2 1.0+im; 2 2]) end end + +end # module TestSymmetric diff --git a/stdlib/LinearAlgebra/test/testutils.jl b/stdlib/LinearAlgebra/test/testutils.jl new file mode 100644 index 00000000000000..33eff29765c708 --- /dev/null +++ b/stdlib/LinearAlgebra/test/testutils.jl @@ -0,0 +1,27 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# Test approximate equality of vectors or columns of matrices modulo floating +# point roundoff and phase (sign) differences. +# +# This function is designed to test for equality between vectors of floating point +# numbers when the vectors are defined only up to a global phase or sign, such as +# normalized eigenvectors or singular vectors. The global phase is usually +# defined consistently, but may occasionally change due to small differences in +# floating point rounding noise or rounding modes, or through the use of +# different conventions in different algorithms. As a result, most tests checking +# such vectors have to detect and discard such overall phase differences. +# +# Inputs: +# a, b:: StridedVecOrMat to be compared +# err :: Default: m^3*(eps(S)+eps(T)), where m is the number of rows +# +# Raises an error if any columnwise vector norm exceeds err. Otherwise, returns +# nothing. +function test_approx_eq_modphase(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, + err = length(axes(a,1))^3*(eps(S)+eps(T))) where {S<:Real,T<:Real} + @test axes(a,1) == axes(b,1) && axes(a,2) == axes(b,2) + for i in axes(a,2) + v1, v2 = a[:, i], b[:, i] + @test min(abs(norm(v1-v2)),abs(norm(v1+v2))) ≈ 0.0 atol=err + end +end diff --git a/test/linalg/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl similarity index 97% rename from test/linalg/triangular.jl rename to stdlib/LinearAlgebra/test/triangular.jl index 66bd4b37b6907f..08469d27fd5893 100644 --- a/test/linalg/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1,11 +1,12 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +module TestTriangular + debug = false -using Test, Random -using Base.LinAlg: BlasFloat, errorbounds, full!, naivesub!, transpose!, - UnitUpperTriangular, UnitLowerTriangular, - mul!, rdiv! -using SparseArrays +using Test, LinearAlgebra, SparseArrays, Random +using LinearAlgebra: BlasFloat, errorbounds, full!, naivesub!, transpose!, + UnitUpperTriangular, UnitLowerTriangular, + mul!, rdiv! debug && println("Triangular matrices") @@ -229,7 +230,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo @test isa(inv(A1), t1) # make sure the call to LAPACK works right if elty1 <: BlasFloat - @test Base.LinAlg.inv!(copy(A1)) ≈ inv(lufact(Matrix(A1))) + @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lufact(Matrix(A1))) end # Determinant @@ -382,7 +383,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo @test_throws DimensionMismatch Ann'\bm @test_throws DimensionMismatch transpose(Ann)\bm if t1 == UpperTriangular || t1 == LowerTriangular - @test_throws Base.LinAlg.SingularException naivesub!(t1(zeros(elty1,n,n)),fill(eltyB(1),n)) + @test_throws LinearAlgebra.SingularException naivesub!(t1(zeros(elty1,n,n)),fill(eltyB(1),n)) end @test B/A1 ≈ B/Matrix(A1) @test B/transpose(A1) ≈ B/transpose(Matrix(A1)) @@ -518,7 +519,8 @@ end @test UpperTriangular(Matrix(1.0I, 3, 3)) \ view(fill(1., 3), [1,2,3]) == fill(1., 3) # dimensional correctness: -isdefined(Main, :TestHelpers) || @eval Main include("../TestHelpers.jl") +const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") +isdefined(Main, :TestHelpers) || @eval Main include(joinpath($(BASE_TEST_PATH), "TestHelpers.jl")) using Main.TestHelpers: Furlong let A = UpperTriangular([Furlong(1) Furlong(4); Furlong(0) Furlong(1)]) @test sqrt(A) == Furlong{1//2}.(UpperTriangular([1 2; 0 1])) @@ -535,3 +537,5 @@ end @test isa(similar(trisparsemat, Float32, (n, n)), SparseMatrixCSC{Float32}) end end + +end # module TestTriangular diff --git a/test/linalg/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl similarity index 92% rename from test/linalg/tridiag.jl rename to stdlib/LinearAlgebra/test/tridiag.jl index 52dc80b961774d..367c0f45b7340b 100644 --- a/test/linalg/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -1,6 +1,10 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Random, SparseArrays +module TestTridiagonal + +using Test, LinearAlgebra, SparseArrays, Random + +include("testutils.jl") # test_approx_eq_modphase #Test equivalence of eigenvectors/singular vectors taking into account possible phase (sign) differences function test_approx_eq_vecs(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, error=nothing) where {S<:Real,T<:Real} @@ -211,18 +215,18 @@ end @test Array(A/α) ≈ Array(A)/α @testset "Matmul with Triangular types" begin - @test A*Base.LinAlg.UnitUpperTriangular(Matrix(1.0I, n, n)) ≈ fA - @test A*Base.LinAlg.UnitLowerTriangular(Matrix(1.0I, n, n)) ≈ fA + @test A*LinearAlgebra.UnitUpperTriangular(Matrix(1.0I, n, n)) ≈ fA + @test A*LinearAlgebra.UnitLowerTriangular(Matrix(1.0I, n, n)) ≈ fA @test A*UpperTriangular(Matrix(1.0I, n, n)) ≈ fA @test A*LowerTriangular(Matrix(1.0I, n, n)) ≈ fA end @testset "mul! errors" begin Cnn, Cnm, Cmn = Matrix{elty}.(uninitialized, ((n,n), (n,n+1), (n+1,n))) - @test_throws DimensionMismatch Base.LinAlg.mul!(Cnn,A,Cnm) - @test_throws DimensionMismatch Base.LinAlg.mul!(Cnn,A,Cmn) - @test_throws DimensionMismatch Base.LinAlg.mul!(Cnn,B,Cmn) - @test_throws DimensionMismatch Base.LinAlg.mul!(Cmn,B,Cnn) - @test_throws DimensionMismatch Base.LinAlg.mul!(Cnm,B,Cnn) + @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,A,Cnm) + @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,A,Cmn) + @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,B,Cmn) + @test_throws DimensionMismatch LinearAlgebra.mul!(Cmn,B,Cnn) + @test_throws DimensionMismatch LinearAlgebra.mul!(Cnm,B,Cnn) end end if mat_type == SymTridiagonal @@ -232,7 +236,7 @@ end @test B + A == A + B @test B - A == A - B end - if elty <: Base.LinAlg.BlasReal + if elty <: LinearAlgebra.BlasReal @testset "Eigensystems" begin zero, infinity = convert(elty, 0), convert(elty, Inf) @testset "stebz! and stein!" begin @@ -251,13 +255,13 @@ end @testset "stegr! call with index range" begin F = eigfact(SymTridiagonal(b, a),1:2) fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),1:2) - Test.test_approx_eq_modphase(F.vectors, fF.vectors) + test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end @testset "stegr! call with value range" begin F = eigfact(SymTridiagonal(b, a),0.0,1.0) fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) - Test.test_approx_eq_modphase(F.vectors, fF.vectors) + test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end @testset "eigenvalues/eigenvectors of symmetric tridiagonal" begin @@ -268,9 +272,9 @@ end D, Vecs = eig(fA) @test DT ≈ D @test abs.(VT'Vecs) ≈ Matrix(elty(1)I, n, n) - Test.test_approx_eq_modphase(eigvecs(A), eigvecs(fA)) + test_approx_eq_modphase(eigvecs(A), eigvecs(fA)) #call to LAPACK.stein here - Test.test_approx_eq_modphase(eigvecs(A,eigvals(A)),eigvecs(A)) + test_approx_eq_modphase(eigvecs(A,eigvals(A)),eigvecs(A)) elseif elty != Int # check that undef is determined accurately even if type inference # bails out due to the number of try/catch blocks in this code. @@ -287,8 +291,8 @@ end @test_throws DimensionMismatch Tldlt\rand(elty,n+1) @test size(Tldlt) == size(Ts) if elty <: AbstractFloat - @test typeof(convert(Base.LinAlg.LDLt{Float32},Tldlt)) == - Base.LinAlg.LDLt{Float32,SymTridiagonal{elty,Vector{elty}}} + @test typeof(convert(LinearAlgebra.LDLt{Float32},Tldlt)) == + LinearAlgebra.LDLt{Float32,SymTridiagonal{elty,Vector{elty}}} end for vv in (copy(v), view(v, 1:n)) invFsv = Fs\vv @@ -348,3 +352,5 @@ end @test SymTridiagonal(1:3, 1:2) == [1 1 0; 1 2 2; 0 2 3] @test Tridiagonal(4:5, 1:3, 1:2) == [1 1 0; 4 2 2; 0 5 3] end + +end # module TestTridiagonal diff --git a/test/linalg/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl similarity index 93% rename from test/linalg/uniformscaling.jl rename to stdlib/LinearAlgebra/test/uniformscaling.jl index 498602fbe307a6..8b51683a856ba2 100644 --- a/test/linalg/uniformscaling.jl +++ b/stdlib/LinearAlgebra/test/uniformscaling.jl @@ -1,7 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random -using SparseArrays +module TestUniformscaling + +using Test, LinearAlgebra, Random, SparseArrays srand(123) @@ -68,8 +69,8 @@ end end @test copy(UniformScaling(one(Float64))) == UniformScaling(one(Float64)) -@test sprint(show,UniformScaling(one(ComplexF64))) == "UniformScaling{Complex{Float64}}\n(1.0 + 0.0im)*I" -@test sprint(show,UniformScaling(one(Float32))) == "UniformScaling{Float32}\n1.0*I" +@test sprint(show,UniformScaling(one(ComplexF64))) == "LinearAlgebra.UniformScaling{Complex{Float64}}\n(1.0 + 0.0im)*I" +@test sprint(show,UniformScaling(one(Float32))) == "LinearAlgebra.UniformScaling{Float32}\n1.0*I" let λ = complex(randn(),randn()) @@ -133,9 +134,9 @@ let @test @inferred(T\I) == inv(T) if isa(A, Array) - T = LinAlg.UnitLowerTriangular(randn(3,3)) + T = LinearAlgebra.UnitLowerTriangular(randn(3,3)) else - T = LinAlg.UnitLowerTriangular(view(randn(3,3), 1:3, 1:3)) + T = LinearAlgebra.UnitLowerTriangular(view(randn(3,3), 1:3, 1:3)) end @test @inferred(T + J) == Array(T) + J @test @inferred(J + T) == J + Array(T) @@ -155,9 +156,9 @@ let @test @inferred(T\I) == inv(T) if isa(A, Array) - T = LinAlg.UnitUpperTriangular(randn(3,3)) + T = LinearAlgebra.UnitUpperTriangular(randn(3,3)) else - T = LinAlg.UnitUpperTriangular(view(randn(3,3), 1:3, 1:3)) + T = LinearAlgebra.UnitUpperTriangular(view(randn(3,3), 1:3, 1:3)) end @test @inferred(T + J) == Array(T) + J @test @inferred(J + T) == J + Array(T) @@ -195,7 +196,7 @@ end for T in (Float64, ComplexF32, BigFloat, Int) λ = T(4) @test chol(λ*I) ≈ √λ*I - @test_throws LinAlg.PosDefException chol(-λ*I) + @test_throws LinearAlgebra.PosDefException chol(-λ*I) end end @@ -257,3 +258,5 @@ end @test eltype(fill(Int8(1), 2, 2) + I) == Int8 @test eltype(fill(Float16(1), 2, 2) + I) == Float16 end + +end # module TestUniformscaling diff --git a/stdlib/SparseArrays/src/SparseArrays.jl b/stdlib/SparseArrays/src/SparseArrays.jl index 82774480af694e..7529043cc43d50 100644 --- a/stdlib/SparseArrays/src/SparseArrays.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -9,24 +9,26 @@ module SparseArrays using Base: ReshapedArray, promote_op, setindex_shape_check, to_shape, tail using Base.Sort: Forward -using Base.LinAlg: AbstractTriangular, PosDefException, fillstored! +using LinearAlgebra import Base: +, -, *, \, /, &, |, xor, == -import Base.LinAlg: mul!, ldiv!, rdiv! +import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, diff, dot, eig, + issymmetric, istril, istriu, lu, trace, transpose!, tril!, triu!, + vecnorm, cond, diagm, factorize, ishermitian, norm, scale!, tril, triu import Base: @get!, acos, acosd, acot, acotd, acsch, asech, asin, asind, asinh, - atan, atand, atanh, broadcast!, chol, conj!, cos, cosc, cosd, cosh, cospi, cot, - cotd, coth, count, csc, cscd, csch, adjoint!, diag, diff, done, dot, eig, + atan, atand, atanh, broadcast!, conj!, cos, cosc, cosd, cosh, cospi, cot, + cotd, coth, count, csc, cscd, csch, done, exp10, exp2, findprev, findnext, floor, hash, indmin, inv, - issymmetric, istril, istriu, log10, log2, lu, next, sec, secd, sech, show, + log10, log2, next, sec, secd, sech, show, sin, sinc, sind, sinh, sinpi, squeeze, start, sum, summary, tan, - tand, tanh, trace, transpose!, tril!, triu!, trunc, vecnorm, abs, abs2, - broadcast, ceil, complex, cond, conj, convert, copy, copyto!, adjoint, diagm, - exp, expm1, factorize, findall, findmax, findmin, findnz, float, getindex, - vcat, hcat, hvcat, cat, imag, indmax, ishermitian, kron, length, log, log1p, max, min, - maximum, minimum, norm, one, promote_eltype, real, reshape, rot180, - rotl90, rotr90, round, scale!, setindex!, similar, size, transpose, tril, - triu, vec, permute!, map, map!, Array + tand, tanh, trunc, abs, abs2, + broadcast, ceil, complex, conj, convert, copy, copyto!, adjoint, + exp, expm1, findall, findmax, findmin, findnz, float, getindex, + vcat, hcat, hvcat, cat, imag, indmax, kron, length, log, log1p, max, min, + maximum, minimum, one, promote_eltype, real, reshape, rot180, + rotl90, rotr90, round, setindex!, similar, size, transpose, + vec, permute!, map, map!, Array using Random: defaultRNG, AbstractRNG, randsubseq, randsubseq! diff --git a/stdlib/SparseArrays/src/abstractsparse.jl b/stdlib/SparseArrays/src/abstractsparse.jl index af145bb216b601..e4511484d69e9f 100644 --- a/stdlib/SparseArrays/src/abstractsparse.jl +++ b/stdlib/SparseArrays/src/abstractsparse.jl @@ -27,12 +27,12 @@ false issparse(A::AbstractArray) = false issparse(S::AbstractSparseArray) = true -issparse(S::Symmetric{<:Any,<:AbstractSparseMatrix}) = true -issparse(S::Hermitian{<:Any,<:AbstractSparseMatrix}) = true -issparse(S::LowerTriangular{<:Any,<:AbstractSparseMatrix}) = true -issparse(S::LinAlg.UnitLowerTriangular{<:Any,<:AbstractSparseMatrix}) = true -issparse(S::UpperTriangular{<:Any,<:AbstractSparseMatrix}) = true -issparse(S::LinAlg.UnitUpperTriangular{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.Symmetric{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.Hermitian{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.LowerTriangular{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.UnitLowerTriangular{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.UpperTriangular{<:Any,<:AbstractSparseMatrix}) = true +issparse(S::LinearAlgebra.UnitUpperTriangular{<:Any,<:AbstractSparseMatrix}) = true indtype(S::AbstractSparseArray{<:Any,Ti}) where {Ti} = Ti diff --git a/stdlib/SparseArrays/src/deprecated.jl b/stdlib/SparseArrays/src/deprecated.jl index 8cae8ed0d544f0..829d6c9401a3d3 100644 --- a/stdlib/SparseArrays/src/deprecated.jl +++ b/stdlib/SparseArrays/src/deprecated.jl @@ -29,7 +29,7 @@ for op in (:exp, :exp2, :exp10, :log, :log2, :log10, end # PR 23341 -import Base.LinAlg: diagm +import LinearAlgebra: diagm @deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) # PR #23757 @@ -61,8 +61,8 @@ end @deprecate sparse(s::UniformScaling, m::Integer) sparse(s, m, m) # PR #25037 -@deprecate spones(A::SparseMatrixCSC) LinAlg.fillstored!(copy(A), 1) -@deprecate spones(A::SparseVector) LinAlg.fillstored!(copy(A), 1) +@deprecate spones(A::SparseMatrixCSC) LinearAlgebra.fillstored!(copy(A), 1) +@deprecate spones(A::SparseVector) LinearAlgebra.fillstored!(copy(A), 1) export spones # full for sparse arrays @@ -133,13 +133,14 @@ function speye(S::SparseMatrixCSC{T}) where T end # former imports into SparseArrays -import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! +import Base: Ac_mul_B, At_mul_B import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt -import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! -import Base.LinAlg: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! +import Base: At_ldiv_B, Ac_ldiv_B +import LinearAlgebra: A_mul_B!, Ac_mul_B!, At_mul_B!, A_ldiv_B! +import LinearAlgebra: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! # A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/linalg.jl, to deprecate -using Base.LinAlg: Adjoint, Transpose +using LinearAlgebra: Adjoint, Transpose @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(adjoint(A), B) @deprecate At_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(transpose(A), B) @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(adjoint(A), B) @@ -178,9 +179,9 @@ using Base.LinAlg: Adjoint, Transpose for isunittri in (true, false), islowertri in (true, false) unitstr = isunittri ? "Unit" : "" halfstr = islowertri ? "Lower" : "Upper" - tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) + tritype = :(LinearAlgebra.$(Symbol(unitstr, halfstr, "Triangular"))) @eval #=Base.SparseArrays=# begin - using Base.LinAlg: Adjoint, Transpose + using LinearAlgebra: Adjoint, Transpose @deprecate At_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) @deprecate At_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) @deprecate At_ldiv_B(A::$tritype, b::SparseVector) (\)(transpose(A), b) @@ -193,7 +194,7 @@ for isunittri in (true, false), islowertri in (true, false) end end -using Base.LinAlg: Adjoint, Transpose +using LinearAlgebra: Adjoint, Transpose @deprecate Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(adjoint(A), x) @deprecate At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(transpose(A), x) @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, adjoint(A), x, β, y) @@ -215,7 +216,7 @@ using Base.LinAlg: Adjoint, Transpose # methods involving RowVector from base/sparse/higherorderfns.jl, to deprecate @eval SparseArrays.HigherOrderFns begin - BroadcastStyle(::Type{<:Base.RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() + BroadcastStyle(::Type{<:RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() end import Base: asyncmap diff --git a/stdlib/SparseArrays/src/higherorderfns.jl b/stdlib/SparseArrays/src/higherorderfns.jl index 25633f9433d421..4e7cc6a88656a3 100644 --- a/stdlib/SparseArrays/src/higherorderfns.jl +++ b/stdlib/SparseArrays/src/higherorderfns.jl @@ -10,6 +10,7 @@ using Base: front, tail, to_shape using ..SparseArrays: SparseVector, SparseMatrixCSC, AbstractSparseVector, AbstractSparseMatrix, AbstractSparseArray, indtype, nnz, nzrange using Base.Broadcast: BroadcastStyle +using LinearAlgebra # This module is organized as follows: # (1) Define a common interface to SparseVectors and SparseMatrixCSCs sufficient for @@ -987,8 +988,8 @@ Broadcast.BroadcastStyle(::PromoteToSparse, ::Broadcast.Style{Tuple}) = Broadcas # Broadcast.BroadcastStyle(::SPVM, ::Broadcast.DefaultArrayStyle{0}) = PromoteToSparse() # Broadcast.BroadcastStyle(::SPVM, ::Broadcast.DefaultArrayStyle{1}) = PromoteToSparse() # Broadcast.BroadcastStyle(::SPVM, ::Broadcast.DefaultArrayStyle{2}) = PromoteToSparse() -BroadcastStyle(::Type{<:Base.Adjoint{T,<:Vector}}) where T = Broadcast.MatrixStyle() # Adjoint not yet defined when broadcast.jl loaded -BroadcastStyle(::Type{<:Base.Transpose{T,<:Vector}}) where T = Broadcast.MatrixStyle() # Transpose not yet defined when broadcast.jl loaded +BroadcastStyle(::Type{<:Adjoint{T,<:Vector}}) where T = Broadcast.MatrixStyle() # Adjoint not yet defined when broadcast.jl loaded +BroadcastStyle(::Type{<:Transpose{T,<:Vector}}) where T = Broadcast.MatrixStyle() # Transpose not yet defined when broadcast.jl loaded Broadcast.BroadcastStyle(::SPVM, ::Broadcast.VectorStyle) = PromoteToSparse() Broadcast.BroadcastStyle(::SPVM, ::Broadcast.MatrixStyle) = PromoteToSparse() Broadcast.BroadcastStyle(::SparseVecStyle, ::Broadcast.DefaultArrayStyle{N}) where N = diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index f3b65b5dd5bdb5..becba99b8c8897 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -import Base.LinAlg: checksquare +import LinearAlgebra: checksquare ## sparse matrix multiplication @@ -128,11 +128,11 @@ end function (*)(D::Diagonal, A::SparseMatrixCSC) T = Base.promote_op(*, eltype(D), eltype(A)) - scale!(LinAlg.copy_oftype(A, T), D.diag, A) + scale!(LinearAlgebra.copy_oftype(A, T), D.diag, A) end function (*)(A::SparseMatrixCSC, D::Diagonal) T = Base.promote_op(*, eltype(D), eltype(A)) - scale!(LinAlg.copy_oftype(A, T), A, D.diag) + scale!(LinearAlgebra.copy_oftype(A, T), A, D.diag) end # Sparse matrix multiplication as described in [Gustavson, 1978]: @@ -207,7 +207,7 @@ end function fwdTriSolve!(A::SparseMatrixCSCUnion, B::AbstractVecOrMat) # forward substitution for CSC matrices nrowB, ncolB = size(B, 1), size(B, 2) - ncol = LinAlg.checksquare(A) + ncol = LinearAlgebra.checksquare(A) if nrowB != ncol throw(DimensionMismatch("A is $(ncol) columns and B has $(nrowB) rows")) end @@ -236,7 +236,7 @@ function fwdTriSolve!(A::SparseMatrixCSCUnion, B::AbstractVecOrMat) B[joff + jai] = bj ii += 1 else - throw(LinAlg.SingularException(j)) + throw(LinearAlgebra.SingularException(j)) end # update remaining part @@ -252,7 +252,7 @@ end function bwdTriSolve!(A::SparseMatrixCSCUnion, B::AbstractVecOrMat) # backward substitution for CSC matrices nrowB, ncolB = size(B, 1), size(B, 2) - ncol = LinAlg.checksquare(A) + ncol = LinearAlgebra.checksquare(A) if nrowB != ncol throw(DimensionMismatch("A is $(ncol) columns and B has $(nrowB) rows")) end @@ -281,7 +281,7 @@ function bwdTriSolve!(A::SparseMatrixCSCUnion, B::AbstractVecOrMat) B[joff + jai] = bj ii -= 1 else - throw(LinAlg.SingularException(j)) + throw(LinearAlgebra.SingularException(j)) end # update remaining part @@ -312,7 +312,7 @@ function rdiv!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where T @inbounds for j in 1:k ddj = dd[j] if iszero(ddj) - throw(LinAlg.SingularException(j)) + throw(LinearAlgebra.SingularException(j)) end for i in nzrange(A, j) nonz[i] /= ddj @@ -983,16 +983,16 @@ end # function factorize(A::Symmetric{Float64,SparseMatrixCSC{Float64,Ti}}) where Ti # F = cholfact(A) -# if LinAlg.issuccess(F) +# if LinearAlgebra.issuccess(F) # return F # else # ldltfact!(F, A) # return F # end # end -function factorize(A::LinAlg.RealHermSymComplexHerm{Float64,<:SparseMatrixCSC}) +function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatrixCSC}) F = cholfact(A) - if LinAlg.issuccess(F) + if LinearAlgebra.issuccess(F) return F else ldltfact!(F, A) diff --git a/stdlib/SparseArrays/src/sparsematrix.jl b/stdlib/SparseArrays/src/sparsematrix.jl index e9fb76a4e562c8..3a5335400c247b 100644 --- a/stdlib/SparseArrays/src/sparsematrix.jl +++ b/stdlib/SparseArrays/src/sparsematrix.jl @@ -1452,7 +1452,7 @@ julia> sprandn(rng, 2, 2, 0.75) sprandn(r::AbstractRNG, m::Integer, n::Integer, density::AbstractFloat) = sprand(r,m,n,density,randn,Float64) sprandn(m::Integer, n::Integer, density::AbstractFloat) = sprandn(defaultRNG(),m,n,density) -LinAlg.fillstored!(S::SparseMatrixCSC, x) = (fill!(view(S.nzval, 1:(S.colptr[S.n + 1] - 1)), x); S) +LinearAlgebra.fillstored!(S::SparseMatrixCSC, x) = (fill!(view(S.nzval, 1:(S.colptr[S.n + 1] - 1)), x); S) """ spzeros([type,]m[,n]) diff --git a/stdlib/SparseArrays/src/sparsevector.jl b/stdlib/SparseArrays/src/sparsevector.jl index 936c09b809d9ef..fe9b0a7ec3442f 100644 --- a/stdlib/SparseArrays/src/sparsevector.jl +++ b/stdlib/SparseArrays/src/sparsevector.jl @@ -3,7 +3,7 @@ ### Common definitions import Base: sort, findall, findnz -import Base.LinAlg: promote_to_array_type, promote_to_arrays_ +import LinearAlgebra: promote_to_array_type, promote_to_arrays_ ### The SparseVector @@ -99,7 +99,7 @@ spzeros(len::Integer) = spzeros(Float64, len) spzeros(::Type{T}, len::Integer) where {T} = SparseVector(len, Int[], T[]) spzeros(::Type{Tv}, ::Type{Ti}, len::Integer) where {Tv,Ti<:Integer} = SparseVector(len, Ti[], Tv[]) -LinAlg.fillstored!(x::SparseVector, y) = (fill!(x.nzval, y); x) +LinearAlgebra.fillstored!(x::SparseVector, y) = (fill!(x.nzval, y); x) ### Construction from lists of indices and values @@ -999,24 +999,24 @@ vcat(X::Union{Vector,SparseVector}...) = vcat(map(sparse, X)...) # TODO: A definition similar to the third exists in base/linalg/bidiag.jl. These definitions # should be consolidated in a more appropriate location, e.g. base/linalg/special.jl. -const _SparseArrays = Union{SparseVector, SparseMatrixCSC, Base.LinAlg.RowVector{<:Any,<:SparseVector}, Adjoint{<:Any,<:SparseVector}, Transpose{<:Any,<:SparseVector}} +const _SparseArrays = Union{SparseVector, SparseMatrixCSC, LinearAlgebra.RowVector{<:Any,<:SparseVector}, Adjoint{<:Any,<:SparseVector}, Transpose{<:Any,<:SparseVector}} const _SpecialArrays = Union{Diagonal, Bidiagonal, Tridiagonal, SymTridiagonal} const _SparseConcatArrays = Union{_SpecialArrays, _SparseArrays} const _Symmetric_SparseConcatArrays{T,A<:_SparseConcatArrays} = Symmetric{T,A} const _Hermitian_SparseConcatArrays{T,A<:_SparseConcatArrays} = Hermitian{T,A} -const _Triangular_SparseConcatArrays{T,A<:_SparseConcatArrays} = Base.LinAlg.AbstractTriangular{T,A} +const _Triangular_SparseConcatArrays{T,A<:_SparseConcatArrays} = LinearAlgebra.AbstractTriangular{T,A} const _Annotated_SparseConcatArrays = Union{_Triangular_SparseConcatArrays, _Symmetric_SparseConcatArrays, _Hermitian_SparseConcatArrays} const _Symmetric_DenseArrays{T,A<:Matrix} = Symmetric{T,A} const _Hermitian_DenseArrays{T,A<:Matrix} = Hermitian{T,A} -const _Triangular_DenseArrays{T,A<:Matrix} = Base.LinAlg.AbstractTriangular{T,A} +const _Triangular_DenseArrays{T,A<:Matrix} = LinearAlgebra.AbstractTriangular{T,A} const _Annotated_DenseArrays = Union{_Triangular_DenseArrays, _Symmetric_DenseArrays, _Hermitian_DenseArrays} const _Annotated_Typed_DenseArrays{T} = Union{_Triangular_DenseArrays{T}, _Symmetric_DenseArrays{T}, _Hermitian_DenseArrays{T}} -const _SparseConcatGroup = Union{Vector, Adjoint{<:Any,<:Vector}, Transpose{<:Any,<:Vector}, Base.LinAlg.RowVector{<:Any,<:Vector}, Matrix, _SparseConcatArrays, _Annotated_SparseConcatArrays, _Annotated_DenseArrays} -const _DenseConcatGroup = Union{Vector, Adjoint{<:Any,<:Vector}, Transpose{<:Any,<:Vector}, Base.LinAlg.RowVector{<:Any, <:Vector}, Matrix, _Annotated_DenseArrays} -const _TypedDenseConcatGroup{T} = Union{Vector{T}, Adjoint{T,Vector{T}}, Transpose{T,Vector{T}}, Base.LinAlg.RowVector{T,Vector{T}}, Matrix{T}, _Annotated_Typed_DenseArrays{T}} +const _SparseConcatGroup = Union{Vector, Adjoint{<:Any,<:Vector}, Transpose{<:Any,<:Vector}, LinearAlgebra.RowVector{<:Any,<:Vector}, Matrix, _SparseConcatArrays, _Annotated_SparseConcatArrays, _Annotated_DenseArrays} +const _DenseConcatGroup = Union{Vector, Adjoint{<:Any,<:Vector}, Transpose{<:Any,<:Vector}, LinearAlgebra.RowVector{<:Any, <:Vector}, Matrix, _Annotated_DenseArrays} +const _TypedDenseConcatGroup{T} = Union{Vector{T}, Adjoint{T,Vector{T}}, Transpose{T,Vector{T}}, LinearAlgebra.RowVector{T,Vector{T}}, Matrix{T}, _Annotated_Typed_DenseArrays{T}} # Concatenations involving un/annotated sparse/special matrices/vectors should yield sparse arrays function cat(catdims, Xin::_SparseConcatGroup...) @@ -1451,7 +1451,7 @@ adjoint(sv::SparseVector) = Adjoint(sv) # axpy -function LinAlg.axpy!(a::Number, x::SparseVectorUnion, y::AbstractVector) +function LinearAlgebra.axpy!(a::Number, x::SparseVectorUnion, y::AbstractVector) length(x) == length(y) || throw(DimensionMismatch()) nzind = nonzeroinds(x) nzval = nonzeros(x) @@ -1556,7 +1556,7 @@ end ### BLAS-2 / dense A * sparse x -> dense y # lowrankupdate (BLAS.ger! like) -function LinAlg.lowrankupdate!(A::StridedMatrix, x::AbstractVector, y::SparseVectorUnion, α::Number = 1) +function LinearAlgebra.lowrankupdate!(A::StridedMatrix, x::AbstractVector, y::SparseVectorUnion, α::Number = 1) nzi = nonzeroinds(y) nzv = nonzeros(y) @inbounds for (j,v) in zip(nzi,nzv) @@ -1803,7 +1803,7 @@ end for isunittri in (true, false), islowertri in (true, false) unitstr = isunittri ? "Unit" : "" halfstr = islowertri ? "Lower" : "Upper" - tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) + tritype = :(LinearAlgebra.$(Symbol(unitstr, halfstr, "Triangular"))) # build out-of-place left-division operations for (istrans, applyxform, xformtype, xformop) in ( @@ -1819,7 +1819,7 @@ for isunittri in (true, false), islowertri in (true, false) TAb = $(isunittri ? :(typeof(zero(TA)*zero(Tb) + zero(TA)*zero(Tb))) : :(typeof((zero(TA)*zero(Tb) + zero(TA)*zero(Tb))/one(TA))) ) - Base.LinAlg.ldiv!($xformop(convert(AbstractArray{TAb}, A)), convert(Array{TAb}, b)) + LinearAlgebra.ldiv!($xformop(convert(AbstractArray{TAb}, A)), convert(Array{TAb}, b)) end # faster method requiring good view support of the @@ -1841,7 +1841,7 @@ for isunittri in (true, false), islowertri in (true, false) :(1:b.nzind[end]) ) nzrangeviewr = view(r, nzrange) nzrangeviewA = $tritype(view(A.data, nzrange, nzrange)) - Base.LinAlg.ldiv!($xformop(convert(AbstractArray{TAb}, nzrangeviewA)), nzrangeviewr) + LinearAlgebra.ldiv!($xformop(convert(AbstractArray{TAb}, nzrangeviewA)), nzrangeviewr) end r end @@ -1850,7 +1850,7 @@ for isunittri in (true, false), islowertri in (true, false) xformtritype = applyxform ? :($xformtype{<:Any,<:$tritype}) : :($tritype) @eval function \(xformA::$xformtritype, b::SparseVector) A = $(applyxform ? :(xformA.parent) : :(xformA) ) - Base.LinAlg.ldiv!($xformop(A), copy(b)) + LinearAlgebra.ldiv!($xformop(A), copy(b)) end end @@ -1884,7 +1884,7 @@ for isunittri in (true, false), islowertri in (true, false) :(1:b.nzind[end]) ) nzrangeviewbnz = view(b.nzval, nzrange .- (b.nzind[1] - 1)) nzrangeviewA = $tritype(view(A.data, nzrange, nzrange)) - Base.LinAlg.ldiv!($xformop(nzrangeviewA), nzrangeviewbnz) + LinearAlgebra.ldiv!($xformop(nzrangeviewA), nzrangeviewbnz) end b end diff --git a/stdlib/SparseArrays/test/runtests.jl b/stdlib/SparseArrays/test/runtests.jl index e761a2fd34567c..4e69db6decb07e 100644 --- a/stdlib/SparseArrays/test/runtests.jl +++ b/stdlib/SparseArrays/test/runtests.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test, SparseArrays +using LinearAlgebra include("higherorderfns.jl") include("sparse.jl") diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 299b7dfe85e94d..3b56b65e2f3644 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Base.LinAlg: mul!, ldiv!, rdiv! +using LinearAlgebra: mul!, ldiv!, rdiv! using Base.Printf: @printf using Random @@ -348,7 +348,7 @@ dA = Array(sA) @test scale!(copy(dAt), bi) ≈ rdiv!(copy(sAt), transpose(Diagonal(b))) @test scale!(copy(dAt), conj(bi)) ≈ rdiv!(copy(sAt), adjoint(Diagonal(b))) @test_throws DimensionMismatch rdiv!(copy(sAt), Diagonal(fill(1., length(b)+1))) - @test_throws LinAlg.SingularException rdiv!(copy(sAt), Diagonal(zeros(length(b)))) + @test_throws LinearAlgebra.SingularException rdiv!(copy(sAt), Diagonal(zeros(length(b)))) end end @@ -1736,7 +1736,7 @@ end end @testset "fillstored!" begin - @test LinAlg.fillstored!(sparse(2.0I, 5, 5), 1) == Matrix(I, 5, 5) + @test LinearAlgebra.fillstored!(sparse(2.0I, 5, 5), 1) == Matrix(I, 5, 5) end @testset "factorization" begin @@ -1774,8 +1774,8 @@ end @test UpperTriangular(A)\(UpperTriangular(A)*b) ≈ b A[2,2] = 0 dropzeros!(A) - @test_throws LinAlg.SingularException LowerTriangular(A)\b - @test_throws LinAlg.SingularException UpperTriangular(A)\b + @test_throws LinearAlgebra.SingularException LowerTriangular(A)\b + @test_throws LinearAlgebra.SingularException UpperTriangular(A)\b end @testset "issue described in https://groups.google.com/forum/#!topic/julia-dev/QT7qpIpgOaA" begin @@ -1788,15 +1788,15 @@ end @test issparse(Symmetric(m)) @test issparse(Hermitian(m)) @test issparse(LowerTriangular(m)) - @test issparse(LinAlg.UnitLowerTriangular(m)) + @test issparse(LinearAlgebra.UnitLowerTriangular(m)) @test issparse(UpperTriangular(m)) - @test issparse(LinAlg.UnitUpperTriangular(m)) + @test issparse(LinearAlgebra.UnitUpperTriangular(m)) @test issparse(Symmetric(Array(m))) == false @test issparse(Hermitian(Array(m))) == false @test issparse(LowerTriangular(Array(m))) == false - @test issparse(LinAlg.UnitLowerTriangular(Array(m))) == false + @test issparse(LinearAlgebra.UnitLowerTriangular(Array(m))) == false @test issparse(UpperTriangular(Array(m))) == false - @test issparse(LinAlg.UnitUpperTriangular(Array(m))) == false + @test issparse(LinearAlgebra.UnitUpperTriangular(Array(m))) == false end @testset "test created type of sprand{T}(::Type{T}, m::Integer, n::Integer, density::AbstractFloat)" begin @@ -2010,7 +2010,7 @@ end sprand(5, 5, 1/5) end A = max.(A, copy(A')) - LinAlg.fillstored!(A, 1) + LinearAlgebra.fillstored!(A, 1) B = A[5:-1:1, 5:-1:1] @test issymmetric(B) end diff --git a/stdlib/SparseArrays/test/sparsevector.jl b/stdlib/SparseArrays/test/sparsevector.jl index 7554b47045496b..05f124bff6a0bf 100644 --- a/stdlib/SparseArrays/test/sparsevector.jl +++ b/stdlib/SparseArrays/test/sparsevector.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Base.LinAlg: mul!, ldiv! +using LinearAlgebra using Random ### Data @@ -124,7 +124,7 @@ end end @testset "fillstored!" begin x = SparseVector(8, [2, 3, 6], [12.0, 18.0, 25.0]) - y = LinAlg.fillstored!(copy(x), 1) + y = LinearAlgebra.fillstored!(copy(x), 1) @test (x .!= 0) == (y .!= 0) @test y == SparseVector(8, [2, 3, 6], [1.0, 1.0, 1.0]) end @@ -760,7 +760,7 @@ end @testset "axpy!" begin for c in [1.0, -1.0, 2.0, -2.0] y = Array(x) - @test Base.axpy!(c, x2, y) === y + @test LinearAlgebra.axpy!(c, x2, y) === y @test y == Array(x2 * c + x) end end @@ -945,8 +945,8 @@ end sparsemat = convert(SparseMatrixCSC{eltypemat}, sparsemat) trimats = (LowerTriangular(densemat), UpperTriangular(densemat), LowerTriangular(sparsemat), UpperTriangular(sparsemat) ) - unittrimats = (Base.LinAlg.UnitLowerTriangular(densemat), Base.LinAlg.UnitUpperTriangular(densemat), - Base.LinAlg.UnitLowerTriangular(sparsemat), Base.LinAlg.UnitUpperTriangular(sparsemat) ) + unittrimats = (LinearAlgebra.UnitLowerTriangular(densemat), LinearAlgebra.UnitUpperTriangular(densemat), + LinearAlgebra.UnitLowerTriangular(sparsemat), LinearAlgebra.UnitUpperTriangular(sparsemat) ) for eltypevec in eltypes spvecs = eltypevec in inttypes ? sparseintvecs : @@ -988,8 +988,8 @@ end transmat = copy(origmat') utmat = UpperTriangular(origmat) ltmat = LowerTriangular(transmat) - uutmat = Base.LinAlg.UnitUpperTriangular(origmat) - ultmat = Base.LinAlg.UnitLowerTriangular(transmat) + uutmat = LinearAlgebra.UnitUpperTriangular(origmat) + ultmat = LinearAlgebra.UnitLowerTriangular(transmat) zerospvec = spzeros(Float64, 2) zerodvec = zeros(Float64, 2) @@ -1258,9 +1258,9 @@ end @test Aj*0.1 == Ajview*0.1 @test 0.1*Aj == 0.1*Ajview @test Aj/0.1 == Ajview/0.1 - @test LinAlg.axpy!(1.0, Aj, sparse(fill(1., n))) == - LinAlg.axpy!(1.0, Ajview, sparse(fill(1., n))) - @test LinAlg.lowrankupdate!(Matrix(1.0*I, n, n), fill(1.0, n), Aj) == - LinAlg.lowrankupdate!(Matrix(1.0*I, n, n), fill(1.0, n), Ajview) + @test LinearAlgebra.axpy!(1.0, Aj, sparse(fill(1., n))) == + LinearAlgebra.axpy!(1.0, Ajview, sparse(fill(1., n))) + @test LinearAlgebra.lowrankupdate!(Matrix(1.0*I, n, n), fill(1.0, n), Aj) == + LinearAlgebra.lowrankupdate!(Matrix(1.0*I, n, n), fill(1.0, n), Ajview) end end diff --git a/stdlib/SuiteSparse/src/SuiteSparse.jl b/stdlib/SuiteSparse/src/SuiteSparse.jl index 6b7558310a1d0b..99a0128d73f476 100644 --- a/stdlib/SuiteSparse/src/SuiteSparse.jl +++ b/stdlib/SuiteSparse/src/SuiteSparse.jl @@ -5,7 +5,7 @@ __precompile__(true) module SuiteSparse import Base: \ -import Base.LinAlg: ldiv!, rdiv! +import LinearAlgebra: ldiv!, rdiv! ## Functions to switch to 0-based indexing to call external sparse solvers diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index a110c1cb7955cc..b376d606585f07 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -5,7 +5,8 @@ module CHOLMOD import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, IndexStyle, IndexLinear, IndexCartesian, adjoint -import Base.LinAlg: (\), +using LinearAlgebra +import LinearAlgebra: (\), cholfact, cholfact!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldltfact, ldltfact!, logdet @@ -348,8 +349,8 @@ end Factor(ptr::Ptr{C_Factor{Tv}}) where {Tv<:VTypes} = Factor{Tv}(ptr) Factor(x::Factor) = x -Base.LinAlg.adjoint(F::Factor) = Adjoint(F) -Base.LinAlg.transpose(F::Factor) = Transpose(F) +Base.adjoint(F::Factor) = Adjoint(F) +Base.transpose(F::Factor) = Transpose(F) # All pointer loads should be checked to make sure that SuiteSparse is not called with # a C_NULL pointer which could cause a segfault. Pointers are set to null @@ -773,7 +774,7 @@ function solve(sys::Integer, F::Factor{Tv}, B::Dense{Tv}) where Tv<:VTypes if !issuccess(F) s = unsafe_load(pointer(F)) if s.is_ll == 1 - throw(LinAlg.PosDefException(s.minor)) + throw(LinearAlgebra.PosDefException(s.minor)) else throw(ArgumentError("factorized matrix has one or more zero pivots. Try using lufact instead.")) end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 2bf2d022684a20..2c36db610d0286 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -17,32 +17,32 @@ end # A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from src/umfpack.jl, to deprecate @eval SuiteSparse.UMFPACK begin - using Base.LinAlg: Adjoint, Transpose - Base.A_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = - Base.LinAlg.ldiv!(X, lu, B) - Base.At_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = - Base.LinAlg.ldiv!(X, transpose(lu), B) - Base.Ac_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = - Base.LinAlg.ldiv!(X, adjoint(lu), B) - Base.A_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = - Base.LinAlg.ldiv!(X, lu, B) - Base.At_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = - Base.LinAlg.ldiv!(X, transpose(lu), B) - Base.Ac_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = - Base.LinAlg.ldiv!(X, adjoint(lu), B) - Base.A_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = Base.LinAlg.ldiv!(lu, B) - Base.At_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = Base.LinAlg.ldiv!(transpose(lu), B) - Base.Ac_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = Base.LinAlg.ldiv!(adjoint(lu), B) - Base.A_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = Base.LinAlg.ldiv!(lu, B) - Base.At_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = Base.LinAlg.ldiv!(transpose(lu), B) - Base.Ac_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = Base.LinAlg.ldiv!(adjoint(lu), B) + using LinearAlgebra: Adjoint, Transpose + LinearAlgebra.A_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = + LinearAlgebra.ldiv!(X, lu, B) + LinearAlgebra.At_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = + LinearAlgebra.ldiv!(X, transpose(lu), B) + LinearAlgebra.Ac_ldiv_B!(X::StridedVecOrMat{T}, lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = + LinearAlgebra.ldiv!(X, adjoint(lu), B) + LinearAlgebra.A_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = + LinearAlgebra.ldiv!(X, lu, B) + LinearAlgebra.At_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = + LinearAlgebra.ldiv!(X, transpose(lu), B) + LinearAlgebra.Ac_ldiv_B!(X::StridedVecOrMat{Tb}, lu::UmfpackLU{Float64}, B::StridedVecOrMat{Tb}) where {Tb<:Complex} = + LinearAlgebra.ldiv!(X, adjoint(lu), B) + LinearAlgebra.A_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = LinearAlgebra.ldiv!(lu, B) + LinearAlgebra.At_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = LinearAlgebra.ldiv!(transpose(lu), B) + LinearAlgebra.Ac_ldiv_B!(lu::UmfpackLU{T}, B::StridedVecOrMat{T}) where {T<:UMFVTypes} = LinearAlgebra.ldiv!(adjoint(lu), B) + LinearAlgebra.A_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = LinearAlgebra.ldiv!(lu, B) + LinearAlgebra.At_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = LinearAlgebra.ldiv!(transpose(lu), B) + LinearAlgebra.Ac_ldiv_B!(lu::UmfpackLU{Float64}, B::StridedVecOrMat{<:Complex}) = LinearAlgebra.ldiv!(adjoint(lu), B) end # A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from src/spqr.jl, to deprecate @eval SuiteSparse.SPQR begin - using Base.LinAlg: Adjoint, Transpose - Base.A_mul_Bc!(A::StridedMatrix, Q::QRSparseQ) = Base.LinAlg.mul!(A, adjoint(Q)) - Base.Ac_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = Base.LinAlg.mul!(adjoint(Q), A) - Base.A_mul_B!(A::StridedMatrix, Q::QRSparseQ) = Base.LinAlg.mul!(A, Q) - Base.A_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = Base.LinAlg.mul!(Q, A) + using LinearAlgebra: Adjoint, Transpose + LinearAlgebra.A_mul_Bc!(A::StridedMatrix, Q::QRSparseQ) = LinearAlgebra.mul!(A, adjoint(Q)) + LinearAlgebra.Ac_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = LinearAlgebra.mul!(adjoint(Q), A) + LinearAlgebra.A_mul_B!(A::StridedMatrix, Q::QRSparseQ) = LinearAlgebra.mul!(A, Q) + LinearAlgebra.A_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = LinearAlgebra.mul!(Q, A) end diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index acd3d37bbd2c7d..742342d7efa348 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -3,6 +3,7 @@ module SPQR import Base: \ +using LinearAlgebra # ordering options */ const ORDERING_FIXED = Int32(0) @@ -103,7 +104,7 @@ end # Struct for storing sparse QR from SPQR such that # A[invperm(rpivinv), cpiv] = (I - factors[:,1]*τ[1]*factors[:,1]')*...*(I - factors[:,k]*τ[k]*factors[:,k]')*R # with k = size(factors, 2). -struct QRSparse{Tv,Ti} <: LinAlg.Factorization{Tv} +struct QRSparse{Tv,Ti} <: LinearAlgebra.Factorization{Tv} factors::SparseMatrixCSC{Tv,Ti} τ::Vector{Tv} R::SparseMatrixCSC{Tv,Ti} @@ -124,7 +125,7 @@ function Base.size(F::QRSparse, i::Integer) end end -struct QRSparseQ{Tv<:CHOLMOD.VTypes,Ti<:Integer} <: LinAlg.AbstractQ{Tv} +struct QRSparseQ{Tv<:CHOLMOD.VTypes,Ti<:Integer} <: LinearAlgebra.AbstractQ{Tv} factors::SparseMatrixCSC{Tv,Ti} τ::Vector{Tv} end @@ -135,7 +136,7 @@ Base.size(Q::QRSparseQ) = (size(Q.factors, 1), size(Q.factors, 1)) _default_tol(A::SparseMatrixCSC) = 20*sum(size(A))*eps(real(eltype(A)))*maximum(norm(view(A, :, i))^2 for i in 1:size(A, 2)) -function Base.LinAlg.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} +function LinearAlgebra.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} R = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() E = Ref{Ptr{CHOLMOD.SuiteSparse_long}}() H = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() @@ -193,11 +194,11 @@ Column permutation: 2 ``` """ -Base.LinAlg.qrfact(A::SparseMatrixCSC; tol = _default_tol(A)) = qrfact(A, Val{true}, tol = tol) +LinearAlgebra.qrfact(A::SparseMatrixCSC; tol = _default_tol(A)) = qrfact(A, Val{true}, tol = tol) -Base.LinAlg.qr(A::SparseMatrixCSC; tol = _default_tol(A)) = qr(A, Val{true}, tol = tol) +LinearAlgebra.qr(A::SparseMatrixCSC; tol = _default_tol(A)) = qr(A, Val{true}, tol = tol) -function Base.LinAlg.mul!(Q::QRSparseQ, A::StridedVecOrMat) +function LinearAlgebra.mul!(Q::QRSparseQ, A::StridedVecOrMat) if size(A, 1) != size(Q, 1) throw(DimensionMismatch("size(Q) = $(size(Q)) but size(A) = $(size(A))")) end @@ -206,13 +207,13 @@ function Base.LinAlg.mul!(Q::QRSparseQ, A::StridedVecOrMat) h = view(Q.factors, :, l) for j in 1:size(A, 2) a = view(A, :, j) - LinAlg.axpy!(τl*dot(h, a), h, a) + LinearAlgebra.axpy!(τl*dot(h, a), h, a) end end return A end -function Base.LinAlg.mul!(A::StridedMatrix, Q::QRSparseQ) +function LinearAlgebra.mul!(A::StridedMatrix, Q::QRSparseQ) if size(A, 2) != size(Q, 1) throw(DimensionMismatch("size(Q) = $(size(Q)) but size(A) = $(size(A))")) end @@ -220,13 +221,13 @@ function Base.LinAlg.mul!(A::StridedMatrix, Q::QRSparseQ) for l in 1:size(Q.factors, 2) τl = -Q.τ[l] h = view(Q.factors, :, l) - Base.LinAlg.mul!(tmp, A, h) - LinAlg.lowrankupdate!(A, tmp, h, τl) + LinearAlgebra.mul!(tmp, A, h) + LinearAlgebra.lowrankupdate!(A, tmp, h, τl) end return A end -function Base.LinAlg.mul!(adjQ::Adjoint{<:Any,<:QRSparseQ}, A::StridedVecOrMat) +function LinearAlgebra.mul!(adjQ::Adjoint{<:Any,<:QRSparseQ}, A::StridedVecOrMat) Q = adjQ.parent if size(A, 1) != size(Q, 1) throw(DimensionMismatch("size(Q) = $(size(Q)) but size(A) = $(size(A))")) @@ -236,13 +237,13 @@ function Base.LinAlg.mul!(adjQ::Adjoint{<:Any,<:QRSparseQ}, A::StridedVecOrMat) h = view(Q.factors, :, l) for j in 1:size(A, 2) a = view(A, :, j) - LinAlg.axpy!(τl'*dot(h, a), h, a) + LinearAlgebra.axpy!(τl'*dot(h, a), h, a) end end return A end -function Base.LinAlg.mul!(A::StridedMatrix, adjQ::Adjoint{<:Any,<:QRSparseQ}) +function LinearAlgebra.mul!(A::StridedMatrix, adjQ::Adjoint{<:Any,<:QRSparseQ}) Q = adjQ.parent if size(A, 2) != size(Q, 1) throw(DimensionMismatch("size(Q) = $(size(Q)) but size(A) = $(size(A))")) @@ -251,8 +252,8 @@ function Base.LinAlg.mul!(A::StridedMatrix, adjQ::Adjoint{<:Any,<:QRSparseQ}) for l in size(Q.factors, 2):-1:1 τl = -Q.τ[l] h = view(Q.factors, :, l) - Base.LinAlg.mul!(tmp, A, h) - LinAlg.lowrankupdate!(A, tmp, h, τl') + LinearAlgebra.mul!(tmp, A, h) + LinearAlgebra.lowrankupdate!(A, tmp, h, τl') end return A end @@ -374,13 +375,13 @@ function _ldiv_basic(F::QRSparse, B::StridedVecOrMat) X0 = view(X, 1:size(B, 1), :) # Apply Q' to B - Base.LinAlg.mul!(adjoint(F.Q), X0) + LinearAlgebra.mul!(adjoint(F.Q), X0) # Zero out to get basic solution X[rnk + 1:end, :] = 0 # Solve R*X = B - Base.LinAlg.ldiv!(UpperTriangular(view(F.R, :, Base.OneTo(rnk))), view(X0, Base.OneTo(rnk), :)) + LinearAlgebra.ldiv!(UpperTriangular(view(F.R, :, Base.OneTo(rnk))), view(X0, Base.OneTo(rnk), :)) # Apply right permutation and extract solution from X return getindex(X, ntuple(i -> i == 1 ? invperm(F.cpiv) : :, Val(ndims(B)))...) diff --git a/stdlib/SuiteSparse/src/umfpack.jl b/stdlib/SuiteSparse/src/umfpack.jl index 371f1dc5d91d7b..9cc32f2261e233 100644 --- a/stdlib/SuiteSparse/src/umfpack.jl +++ b/stdlib/SuiteSparse/src/umfpack.jl @@ -5,7 +5,8 @@ module UMFPACK export UmfpackLU import Base: (\), findnz, getproperty, show, size -import Base.LinAlg: Factorization, det, lufact, ldiv! +using LinearAlgebra +import LinearAlgebra: Factorization, det, lufact, ldiv! using SparseArrays import SparseArrays: nnz @@ -21,7 +22,7 @@ function umferror(status::Integer) if status==UMFPACK_OK return elseif status==UMFPACK_WARNING_singular_matrix - throw(LinAlg.SingularException(0)) + throw(LinearAlgebra.SingularException(0)) elseif status==UMFPACK_WARNING_determinant_underflow throw(MatrixIllConditionedException("the determinant is nonzero but underflowed")) elseif status==UMFPACK_WARNING_determinant_overflow @@ -103,8 +104,8 @@ mutable struct UmfpackLU{Tv<:UMFVTypes,Ti<:UMFITypes} <: Factorization{Tv} nzval::Vector{Tv} end -Base.LinAlg.adjoint(F::UmfpackLU) = Adjoint(F) -Base.LinAlg.transpose(F::UmfpackLU) = Transpose(F) +Base.adjoint(F::UmfpackLU) = Adjoint(F) +Base.transpose(F::UmfpackLU) = Transpose(F) """ lufact(A::SparseMatrixCSC) -> F::UmfpackLU diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 7d8ada341e5a34..1efa24ecfba51a 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -377,14 +377,14 @@ end C = A1 + copy(adjoint(A1)) λmaxC = eigmax(Array(C)) b = fill(1., size(A1, 1)) - @test_throws LinAlg.PosDefException cholfact(C - 2λmaxC*I)\b - @test_throws LinAlg.PosDefException cholfact(C, shift=-2λmaxC)\b + @test_throws LinearAlgebra.PosDefException cholfact(C - 2λmaxC*I)\b + @test_throws LinearAlgebra.PosDefException cholfact(C, shift=-2λmaxC)\b @test_throws ArgumentError ldltfact(C - C[1,1]*I)\b @test_throws ArgumentError ldltfact(C, shift=-real(C[1,1]))\b @test !isposdef(cholfact(C - 2λmaxC*I)) @test !isposdef(cholfact(C, shift=-2λmaxC)) - @test !LinAlg.issuccess(ldltfact(C - C[1,1]*I)) - @test !LinAlg.issuccess(ldltfact(C, shift=-real(C[1,1]))) + @test !LinearAlgebra.issuccess(ldltfact(C - C[1,1]*I)) + @test !LinearAlgebra.issuccess(ldltfact(C, shift=-real(C[1,1]))) F = cholfact(A1pd) tmp = IOBuffer() show(tmp, F) @@ -699,9 +699,9 @@ end @test s.is_super == 0 @test F\b ≈ fill(1., m+n) F2 = cholfact(M) - @test !LinAlg.issuccess(F2) + @test !LinearAlgebra.issuccess(F2) ldltfact!(F2, M) - @test LinAlg.issuccess(F2) + @test LinearAlgebra.issuccess(F2) @test F2\b ≈ fill(1., m+n) end @@ -804,11 +804,11 @@ end @testset "Issue #22335" begin local A, F A = sparse(1.0I, 3, 3) - @test LinAlg.issuccess(cholfact(A)) + @test LinearAlgebra.issuccess(cholfact(A)) A[3, 3] = -1 F = cholfact(A) - @test !LinAlg.issuccess(F) - @test LinAlg.issuccess(ldltfact!(F, A)) + @test !LinearAlgebra.issuccess(F) + @test LinearAlgebra.issuccess(ldltfact!(F, A)) A[3, 3] = 1 @test A[:, 3:-1:1]\fill(1., 3) == [1, 1, 1] end diff --git a/stdlib/SuiteSparse/test/runtests.jl b/stdlib/SuiteSparse/test/runtests.jl index 861dbb4a002468..d317023c6c84d9 100644 --- a/stdlib/SuiteSparse/test/runtests.jl +++ b/stdlib/SuiteSparse/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test, Random -using SuiteSparse, SparseArrays +using SuiteSparse, LinearAlgebra, SparseArrays if Base.USE_GPL_LIBS include("umfpack.jl") diff --git a/stdlib/SuiteSparse/test/spqr.jl b/stdlib/SuiteSparse/test/spqr.jl index ff9a93eedf7894..e59e1ff2410262 100644 --- a/stdlib/SuiteSparse/test/spqr.jl +++ b/stdlib/SuiteSparse/test/spqr.jl @@ -2,7 +2,7 @@ using SuiteSparse.SPQR using SuiteSparse.CHOLMOD -using Base.LinAlg: mul!, Adjoint, Transpose +using LinearAlgebra: mul!, Adjoint, Transpose @testset "Sparse QR" begin m, n = 100, 10 diff --git a/stdlib/SuiteSparse/test/umfpack.jl b/stdlib/SuiteSparse/test/umfpack.jl index ec747d4a42d1dc..1e48075937895f 100644 --- a/stdlib/SuiteSparse/test/umfpack.jl +++ b/stdlib/SuiteSparse/test/umfpack.jl @@ -8,7 +8,7 @@ # based on deps/Suitesparse-4.0.2/UMFPACK/Demo/umfpack_di_demo.c using SuiteSparse: increment! - using Base.LinAlg: Adjoint, Transpose + using LinearAlgebra: Adjoint, Transpose A0 = sparse(increment!([0,4,1,1,2,2,0,1,2,3,4,4]), increment!([0,4,0,2,1,2,1,4,3,2,1,2]), @@ -36,7 +36,7 @@ @test x ≈ float([1:5;]) @test z === x y = similar(z) - Base.LinAlg.ldiv!(y, lua, complex.(b)) + LinearAlgebra.ldiv!(y, lua, complex.(b)) @test y ≈ x @test A*x ≈ b @@ -162,9 +162,9 @@ X = zeros(Complex{Float64}, N, N) B = complex.(rand(N, N), rand(N, N)) luA, lufA = lufact(A), lufact(Array(A)) - @test Base.LinAlg.ldiv!(copy(X), luA, B) ≈ Base.LinAlg.ldiv!(copy(X), lufA, B) - @test Base.LinAlg.ldiv!(copy(X), adjoint(luA), B) ≈ Base.LinAlg.ldiv!(copy(X), adjoint(lufA), B) - @test Base.LinAlg.ldiv!(copy(X), transpose(luA), B) ≈ Base.LinAlg.ldiv!(copy(X), transpose(lufA), B) + @test LinearAlgebra.ldiv!(copy(X), luA, B) ≈ LinearAlgebra.ldiv!(copy(X), lufA, B) + @test LinearAlgebra.ldiv!(copy(X), adjoint(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), adjoint(lufA), B) + @test LinearAlgebra.ldiv!(copy(X), transpose(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), transpose(lufA), B) end end diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index 6cbdb195d037a3..fa9b99c65ab177 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -1255,32 +1255,6 @@ macro inferred(ex) end) end -# Test approximate equality of vectors or columns of matrices modulo floating -# point roundoff and phase (sign) differences. -# -# This function is designed to test for equality between vectors of floating point -# numbers when the vectors are defined only up to a global phase or sign, such as -# normalized eigenvectors or singular vectors. The global phase is usually -# defined consistently, but may occasionally change due to small differences in -# floating point rounding noise or rounding modes, or through the use of -# different conventions in different algorithms. As a result, most tests checking -# such vectors have to detect and discard such overall phase differences. -# -# Inputs: -# a, b:: StridedVecOrMat to be compared -# err :: Default: m^3*(eps(S)+eps(T)), where m is the number of rows -# -# Raises an error if any columnwise vector norm exceeds err. Otherwise, returns -# nothing. -function test_approx_eq_modphase(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, - err = length(axes(a,1))^3*(eps(S)+eps(T))) where {S<:Real,T<:Real} - @test axes(a,1) == axes(b,1) && axes(a,2) == axes(b,2) - for i in axes(a,2) - v1, v2 = a[:, i], b[:, i] - @test min(abs(norm(v1-v2)),abs(norm(v1+v2))) ≈ 0.0 atol=err - end -end - """ detect_ambiguities(mod1, mod2...; imported=false, recursive=false, ambiguous_bottom=false) diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 820c2ae81ad5d4..2d706aef04b6a8 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Random, SparseArrays +using Random, LinearAlgebra, SparseArrays A = rand(5,4,3) @testset "Bounds checking" begin @@ -682,9 +682,9 @@ end # checksquare function test_checksquare() - @test LinAlg.checksquare(zeros(2,2)) == 2 - @test LinAlg.checksquare(zeros(2,2),zeros(3,3)) == [2,3] - @test_throws DimensionMismatch LinAlg.checksquare(zeros(2,3)) + @test LinearAlgebra.checksquare(zeros(2,2)) == 2 + @test LinearAlgebra.checksquare(zeros(2,2),zeros(3,3)) == [2,3] + @test_throws DimensionMismatch LinearAlgebra.checksquare(zeros(2,3)) end #----- run tests -------------------------------------------------------------# diff --git a/test/ambiguous.jl b/test/ambiguous.jl index c3ba4b34e58c0b..aee8d446a9ada9 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -9,7 +9,7 @@ ambig(x::Int, y::Int) = 4 ambig(x::Number, y) = 5 # END OF LINE NUMBER SENSITIVITY -using SparseArrays +using LinearAlgebra, SparseArrays # For curmod_* include("testenv.jl") @@ -283,9 +283,9 @@ end @test_broken need_to_handle_undef_sparam == Set() pop!(need_to_handle_undef_sparam, which(Base.cat, Tuple{Any, AbstractArray})) pop!(need_to_handle_undef_sparam, which(Base.byteenv, (Union{AbstractArray{Pair{T}, 1}, Tuple{Vararg{Pair{T}}}} where T<:AbstractString,))) - pop!(need_to_handle_undef_sparam, which(Base.LinAlg.promote_leaf_eltypes, (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:Number,))) - pop!(need_to_handle_undef_sparam, which(Base.LinAlg.promote_leaf_eltypes, - (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:(AbstractArray{<:Number}),))) + # pop!(need_to_handle_undef_sparam, which(LinearAlgebra.promote_leaf_eltypes, (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:Number,))) + # pop!(need_to_handle_undef_sparam, which(LinearAlgebra.promote_leaf_eltypes, + # (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:(AbstractArray{<:Number}),))) pop!(need_to_handle_undef_sparam, which(Base.cat, (Any, SparseArrays._TypedDenseConcatGroup{T} where T))) pop!(need_to_handle_undef_sparam, which(Base.float, Tuple{AbstractArray{Union{Missing, T},N} where {T, N}})) pop!(need_to_handle_undef_sparam, which(Base.convert, Tuple{Type{Union{Missing, T}} where T, Any})) diff --git a/test/arrayops.jl b/test/arrayops.jl index 7157ca70e33603..747a6af9e495ae 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -5,7 +5,7 @@ isdefined(Main, :TestHelpers) || @eval Main include("TestHelpers.jl") using Main.TestHelpers.OAs using SparseArrays -using Random +using Random, LinearAlgebra @testset "basics" begin @test length([1, 2, 3]) == 3 @@ -1723,7 +1723,7 @@ end b = rand(6,7) @test_throws BoundsError copyto!(a,b) @test_throws ArgumentError copyto!(a,2:3,1:3,b,1:5,2:7) - @test_throws ArgumentError Base.copy_transpose!(a,2:3,1:3,b,1:5,2:7) + @test_throws ArgumentError LinearAlgebra.copy_transpose!(a,2:3,1:3,b,1:5,2:7) end module RetTypeDecl diff --git a/test/bitarray.jl b/test/bitarray.jl index 9d768195f0a9e2..bb358d713eb426 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Base: findprevnot, findnextnot -using Random +using Random, LinearAlgebra tc(r1::NTuple{N,Any}, r2::NTuple{N,Any}) where {N} = all(x->tc(x...), [zip(r1,r2)...]) tc(r1::BitArray{N}, r2::Union{BitArray{N},Array{Bool,N}}) where {N} = true diff --git a/test/broadcast.jl b/test/broadcast.jl index df176eacde2c16..f094027160e27b 100644 --- a/test/broadcast.jl +++ b/test/broadcast.jl @@ -134,7 +134,7 @@ for arr in (identity, as_sub) @test A == fill(7, 2, 2) A = arr(zeros(3,3)) broadcast_setindex!(A, 10:12, 1:3, 1:3) - @test A == diagm(0 => 10:12) + @test A == [10 0 0; 0 11 0; 0 0 12] @test_throws BoundsError broadcast_setindex!(A, 7, [1,-1], [1 2]) for f in ((==), (<) , (!=), (<=)) diff --git a/test/choosetests.jl b/test/choosetests.jl index 92d7d30c2c1308..4984937dfc2418 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -33,7 +33,7 @@ in the `choices` argument: """ -> function choosetests(choices = []) testnames = [ - "linalg", "subarray", "core", "compiler", "worlds", + "subarray", "core", "compiler", "worlds", "keywordargs", "numbers", "subtype", "char", "strings", "triplequote", "unicode", "intrinsics", "dict", "hashing", "iobuffer", "staged", "offsetarray", @@ -111,22 +111,22 @@ function choosetests(choices = []) prepend!(tests, ["subarray"]) end - linalgtests = ["linalg/triangular", "linalg/qr", "linalg/dense", - "linalg/matmul", "linalg/schur", "linalg/special", - "linalg/eigen", "linalg/bunchkaufman", "linalg/svd", - "linalg/lapack", "linalg/tridiag", "linalg/bidiag", - "linalg/diagonal", "linalg/pinv", "linalg/givens", - "linalg/cholesky", "linalg/lu", "linalg/symmetric", - "linalg/generic", "linalg/uniformscaling", "linalg/lq", - "linalg/hessenberg", "linalg/blas", "linalg/adjtrans"] - - if "linalg" in skip_tests - filter!(x -> (x != "linalg" && !(x in linalgtests)), tests) - elseif "linalg" in tests - # specifically selected case - filter!(x -> x != "linalg", tests) - prepend!(tests, linalgtests) - end + # linalgtests = ["linalg/triangular", "linalg/qr", "linalg/dense", + # "linalg/matmul", "linalg/schur", "linalg/special", + # "linalg/eigen", "linalg/bunchkaufman", "linalg/svd", + # "linalg/lapack", "linalg/tridiag", "linalg/bidiag", + # "linalg/diagonal", "linalg/pinv", "linalg/givens", + # "linalg/cholesky", "linalg/lu", "linalg/symmetric", + # "linalg/generic", "linalg/uniformscaling", "linalg/lq", + # "linalg/hessenberg", "linalg/blas", "linalg/adjtrans"] + + # if "linalg" in skip_tests + # filter!(x -> (x != "linalg" && !(x in linalgtests)), tests) + # elseif "linalg" in tests + # # specifically selected case + # filter!(x -> x != "linalg", tests) + # prepend!(tests, linalgtests) + # end compilertests = ["compiler/compiler", "compiler/validation"] diff --git a/test/compile.jl b/test/compile.jl index ff8f7157135e72..b2787eab7f8a05 100644 --- a/test/compile.jl +++ b/test/compile.jl @@ -219,7 +219,7 @@ try # plus modules included in the system image Dict(s => Base.module_uuid(Base.root_module(s)) for s in [:Base64, :CRC32c, :Dates, :DelimitedFiles, :Distributed, :FileWatching, - :Future, :IterativeEigensolvers, :Libdl, :Logging, :Mmap, :Printf, + :Future, :IterativeEigensolvers, :Libdl, :LinearAlgebra, :Logging, :Mmap, :Printf, :Profile, :Random, :SharedArrays, :SparseArrays, :SuiteSparse, :Test, :Unicode])) @test discard_module.(deps) == deps1 diff --git a/test/complex.jl b/test/complex.jl index f6c99b9232ff69..00128852b13ef7 100644 --- a/test/complex.jl +++ b/test/complex.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using LinearAlgebra + @test reim(2 + 3im) == (2, 3) for T in (Int64, Float64) diff --git a/test/core.jl b/test/core.jl index 61218c91c3f295..6ec2878a78a5fd 100644 --- a/test/core.jl +++ b/test/core.jl @@ -1955,6 +1955,7 @@ test5536(a::Union{Real, AbstractArray}) = "Non-splatting" # issue #6142 import Base: + +import LinearAlgebra: UniformScaling, I mutable struct A6142 <: AbstractMatrix{Float64}; end +(x::A6142, y::UniformScaling) = "UniformScaling method called" +(x::A6142, y::AbstractArray) = "AbstractArray method called" diff --git a/test/dimensionful.jl b/test/dimensionful.jl index 5c526c17e93ff7..21c9747806d545 100644 --- a/test/dimensionful.jl +++ b/test/dimensionful.jl @@ -32,7 +32,8 @@ canonical_p(p) = isinteger(p) ? Int(p) : Rational{Int}(p) Base.abs(x::Furlong{p}) where {p} = Furlong{p}(abs(x.val)) @generated Base.abs2(x::Furlong{p}) where {p} = :(Furlong{$(canonical_p(2p))}(abs2(x.val))) @generated Base.inv(x::Furlong{p}) where {p} = :(Furlong{$(canonical_p(-p))}(inv(x.val))) -Base.sylvester(a::Furlong,b::Furlong,c::Furlong) = -c / (a + b) +import LinearAlgebra: sylvester +sylvester(a::Furlong,b::Furlong,c::Furlong) = -c / (a + b) for f in (:isfinite, :isnan, :isreal) @eval Base.$f(x::Furlong) = $f(x.val) diff --git a/test/docs.jl b/test/docs.jl index ed143bd6f2b35c..093397ebdeea0a 100644 --- a/test/docs.jl +++ b/test/docs.jl @@ -553,7 +553,12 @@ let d = @doc(I15424.LazyHelp) end # Issue #13385. -@test @doc(I) !== nothing +struct I13385 + λ +end +"issue #13385" +const i13385 = I13385(true) +@test @doc(i13385) !== nothing # Issue #12700. @test docstrings_equal(@doc(DocsTest.@m), doc"Inner.@m") @@ -634,7 +639,7 @@ f12593_2() = 1 @test (@doc f12593_2) !== nothing # @test Docs.doc(svdvals, Tuple{Vector{Float64}}) === nothing -@test Docs.doc(svdvals, Tuple{Float64}) !== nothing +# @test Docs.doc(svdvals, Tuple{Float64}) !== nothing # crude test to make sure we sort docstring output by method specificity @test !docstrings_equal(Docs.doc(getindex, Tuple{Dict{Int,Int},Int}), @@ -885,13 +890,13 @@ let x = Binding(Base, Symbol("@time")) @test @var(Base.Pkg.@time) == x end -let x = Binding(Base.LinAlg, :norm) - @test defined(x) == true - @test @var(norm) == x - @test @var(Base.norm) == x - @test @var(Base.LinAlg.norm) == x - @test @var(Base.Pkg.Dir.norm) == x -end +# let x = Binding(LinearAlgebra, :norm) +# @test defined(x) == true +# @test @var(norm) == x +# @test @var(Base.norm) == x +# @test @var(LinearAlgebra.norm) == x +# @test @var(Base.Pkg.Dir.norm) == x +# end let x = Binding(Core, :Int) @test defined(x) == true diff --git a/test/hashing.jl b/test/hashing.jl index 4de1b7d9886316..4b210c1e3537bf 100644 --- a/test/hashing.jl +++ b/test/hashing.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Random, SparseArrays +using Random, LinearAlgebra, SparseArrays types = Any[ Bool, diff --git a/test/math.jl b/test/math.jl index 8571b6299917e9..1ff7cb17b7ac45 100644 --- a/test/math.jl +++ b/test/math.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Random +using LinearAlgebra function isnan_type(::Type{T}, x) where T isa(x, T) && isnan(x) diff --git a/test/numbers.jl b/test/numbers.jl index 570ffa5813d590..de416f8253c26d 100644 --- a/test/numbers.jl +++ b/test/numbers.jl @@ -2,6 +2,7 @@ using Base.MathConstants using Random +using LinearAlgebra const ≣ = isequal # convenient for comparing NaNs diff --git a/test/offsetarray.jl b/test/offsetarray.jl index 60ec245c711f74..f757a334433869 100644 --- a/test/offsetarray.jl +++ b/test/offsetarray.jl @@ -4,6 +4,7 @@ isdefined(Main, :TestHelpers) || @eval Main include(joinpath(dirname(@__FILE__), using Main.TestHelpers.OAs using DelimitedFiles using Random +using LinearAlgebra const OAs_name = join(fullname(OAs), ".") diff --git a/test/perf/blas/level3.jl b/test/perf/blas/level3.jl index 0a64a83519337c..198d7e2b356152 100644 --- a/test/perf/blas/level3.jl +++ b/test/perf/blas/level3.jl @@ -6,7 +6,7 @@ function matmultest(n, iter) a = rand(n,n) b = similar(a) for i=1:iter - Base.LinAlg.mul!(b, a, a) + LinearAlgebra.mul!(b, a, a) end b end diff --git a/test/perf/threads/stockcorr/pstockcorr.jl b/test/perf/threads/stockcorr/pstockcorr.jl index a14a42b6447bc4..69a9a52e0f916a 100644 --- a/test/perf/threads/stockcorr/pstockcorr.jl +++ b/test/perf/threads/stockcorr/pstockcorr.jl @@ -48,7 +48,7 @@ function runpath!(n, Wiener, CorrWiener, SA, SB, T, UpperTriangle, k11, k12, k21 #for i = 1:n randn!(rngs[threadid()], Wiener) #randn!(rngs[1], Wiener) - Base.LinAlg.mul!(CorrWiener, Wiener, UpperTriangle) + LinearAlgebra.mul!(CorrWiener, Wiener, UpperTriangle) @simd for j = 2:T @inbounds SA[j, i] = SA[j-1, i] * exp(k11 + k12*CorrWiener[j-1, 1]) @inbounds SB[j, i] = SB[j-1, i] * exp(k21 + k22*CorrWiener[j-1, 2]) diff --git a/test/replutil.jl b/test/replutil.jl index 596d8aa9e4ec21..8602ac55ad3565 100644 --- a/test/replutil.jl +++ b/test/replutil.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Random +using Random, LinearAlgebra # For curmod_* include("testenv.jl") diff --git a/test/runtests.jl b/test/runtests.jl index 93195a9e388506..71507ec947b902 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -47,12 +47,13 @@ move_to_node1("SharedArrays") # since it starts a lot of workers and can easily exceed the maximum memory max_worker_rss != typemax(Csize_t) && move_to_node1("Distributed") +import LinearAlgebra cd(dirname(@__FILE__)) do n = 1 if net_on n = min(Sys.CPU_CORES, length(tests)) n > 1 && addprocs_with_testenv(n) - BLAS.set_num_threads(1) + LinearAlgebra.BLAS.set_num_threads(1) end skipped = 0 diff --git a/test/show.jl b/test/show.jl index 48ec3c9f57245c..a604a29a23eb52 100644 --- a/test/show.jl +++ b/test/show.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using SparseArrays +using LinearAlgebra, SparseArrays # For curmod_* include("testenv.jl") @@ -554,7 +554,7 @@ end #test methodshow.jl functions @test Base.inbase(Base) -@test Base.inbase(LinAlg) +@test !Base.inbase(LinearAlgebra) @test !Base.inbase(Core) let repr = sprint(show, "text/plain", methods(Base.inbase)) @@ -589,7 +589,7 @@ end @test replstr(Matrix(1.0I, 10, 10)) == "10×10 Array{Float64,2}:\n 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0" # an array too long vertically to fit on screen, and too long horizontally: @test replstr(Vector(1.:100.)) == "100-element Array{Float64,1}:\n 1.0\n 2.0\n 3.0\n 4.0\n 5.0\n 6.0\n 7.0\n 8.0\n 9.0\n 10.0\n ⋮ \n 92.0\n 93.0\n 94.0\n 95.0\n 96.0\n 97.0\n 98.0\n 99.0\n 100.0" -@test replstr(Vector(1.:100.)') == "1×100 Adjoint{Float64,Array{Float64,1}}:\n 1.0 2.0 3.0 4.0 5.0 6.0 7.0 … 95.0 96.0 97.0 98.0 99.0 100.0" +@test replstr(Vector(1.:100.)') == "1×100 LinearAlgebra.Adjoint{Float64,Array{Float64,1}}:\n 1.0 2.0 3.0 4.0 5.0 6.0 7.0 … 95.0 96.0 97.0 98.0 99.0 100.0" # too big in both directions to fit on screen: @test replstr((1.:100.)*(1:100)') == "100×100 Array{Float64,2}:\n 1.0 2.0 3.0 4.0 5.0 6.0 … 97.0 98.0 99.0 100.0\n 2.0 4.0 6.0 8.0 10.0 12.0 194.0 196.0 198.0 200.0\n 3.0 6.0 9.0 12.0 15.0 18.0 291.0 294.0 297.0 300.0\n 4.0 8.0 12.0 16.0 20.0 24.0 388.0 392.0 396.0 400.0\n 5.0 10.0 15.0 20.0 25.0 30.0 485.0 490.0 495.0 500.0\n 6.0 12.0 18.0 24.0 30.0 36.0 … 582.0 588.0 594.0 600.0\n 7.0 14.0 21.0 28.0 35.0 42.0 679.0 686.0 693.0 700.0\n 8.0 16.0 24.0 32.0 40.0 48.0 776.0 784.0 792.0 800.0\n 9.0 18.0 27.0 36.0 45.0 54.0 873.0 882.0 891.0 900.0\n 10.0 20.0 30.0 40.0 50.0 60.0 970.0 980.0 990.0 1000.0\n ⋮ ⋮ ⋱ \n 92.0 184.0 276.0 368.0 460.0 552.0 8924.0 9016.0 9108.0 9200.0\n 93.0 186.0 279.0 372.0 465.0 558.0 9021.0 9114.0 9207.0 9300.0\n 94.0 188.0 282.0 376.0 470.0 564.0 9118.0 9212.0 9306.0 9400.0\n 95.0 190.0 285.0 380.0 475.0 570.0 9215.0 9310.0 9405.0 9500.0\n 96.0 192.0 288.0 384.0 480.0 576.0 … 9312.0 9408.0 9504.0 9600.0\n 97.0 194.0 291.0 388.0 485.0 582.0 9409.0 9506.0 9603.0 9700.0\n 98.0 196.0 294.0 392.0 490.0 588.0 9506.0 9604.0 9702.0 9800.0\n 99.0 198.0 297.0 396.0 495.0 594.0 9603.0 9702.0 9801.0 9900.0\n 100.0 200.0 300.0 400.0 500.0 600.0 9700.0 9800.0 9900.0 10000.0" @@ -637,13 +637,13 @@ end # test structured zero matrix printing for select structured types let A = reshape(1:16, 4, 4) - @test replstr(Diagonal(A)) == "4×4 Diagonal{$(Int),Array{$(Int),1}}:\n 1 ⋅ ⋅ ⋅\n ⋅ 6 ⋅ ⋅\n ⋅ ⋅ 11 ⋅\n ⋅ ⋅ ⋅ 16" - @test replstr(Bidiagonal(A, :U)) == "4×4 Bidiagonal{$(Int),Array{$(Int),1}}:\n 1 5 ⋅ ⋅\n ⋅ 6 10 ⋅\n ⋅ ⋅ 11 15\n ⋅ ⋅ ⋅ 16" - @test replstr(Bidiagonal(A, :L)) == "4×4 Bidiagonal{$(Int),Array{$(Int),1}}:\n 1 ⋅ ⋅ ⋅\n 2 6 ⋅ ⋅\n ⋅ 7 11 ⋅\n ⋅ ⋅ 12 16" - @test replstr(SymTridiagonal(A + A')) == "4×4 SymTridiagonal{$(Int),Array{$(Int),1}}:\n 2 7 ⋅ ⋅\n 7 12 17 ⋅\n ⋅ 17 22 27\n ⋅ ⋅ 27 32" - @test replstr(Tridiagonal(diag(A, -1), diag(A), diag(A, +1))) == "4×4 Tridiagonal{$(Int),Array{$(Int),1}}:\n 1 5 ⋅ ⋅\n 2 6 10 ⋅\n ⋅ 7 11 15\n ⋅ ⋅ 12 16" - @test replstr(UpperTriangular(copy(A))) == "4×4 UpperTriangular{$Int,Array{$Int,2}}:\n 1 5 9 13\n ⋅ 6 10 14\n ⋅ ⋅ 11 15\n ⋅ ⋅ ⋅ 16" - @test replstr(LowerTriangular(copy(A))) == "4×4 LowerTriangular{$Int,Array{$Int,2}}:\n 1 ⋅ ⋅ ⋅\n 2 6 ⋅ ⋅\n 3 7 11 ⋅\n 4 8 12 16" + @test replstr(Diagonal(A)) == "4×4 LinearAlgebra.Diagonal{$(Int),Array{$(Int),1}}:\n 1 ⋅ ⋅ ⋅\n ⋅ 6 ⋅ ⋅\n ⋅ ⋅ 11 ⋅\n ⋅ ⋅ ⋅ 16" + @test replstr(Bidiagonal(A, :U)) == "4×4 LinearAlgebra.Bidiagonal{$(Int),Array{$(Int),1}}:\n 1 5 ⋅ ⋅\n ⋅ 6 10 ⋅\n ⋅ ⋅ 11 15\n ⋅ ⋅ ⋅ 16" + @test replstr(Bidiagonal(A, :L)) == "4×4 LinearAlgebra.Bidiagonal{$(Int),Array{$(Int),1}}:\n 1 ⋅ ⋅ ⋅\n 2 6 ⋅ ⋅\n ⋅ 7 11 ⋅\n ⋅ ⋅ 12 16" + @test replstr(SymTridiagonal(A + A')) == "4×4 LinearAlgebra.SymTridiagonal{$(Int),Array{$(Int),1}}:\n 2 7 ⋅ ⋅\n 7 12 17 ⋅\n ⋅ 17 22 27\n ⋅ ⋅ 27 32" + @test replstr(Tridiagonal(diag(A, -1), diag(A), diag(A, +1))) == "4×4 LinearAlgebra.Tridiagonal{$(Int),Array{$(Int),1}}:\n 1 5 ⋅ ⋅\n 2 6 10 ⋅\n ⋅ 7 11 15\n ⋅ ⋅ 12 16" + @test replstr(UpperTriangular(copy(A))) == "4×4 LinearAlgebra.UpperTriangular{$Int,Array{$Int,2}}:\n 1 5 9 13\n ⋅ 6 10 14\n ⋅ ⋅ 11 15\n ⋅ ⋅ ⋅ 16" + @test replstr(LowerTriangular(copy(A))) == "4×4 LinearAlgebra.LowerTriangular{$Int,Array{$Int,2}}:\n 1 ⋅ ⋅ ⋅\n 2 6 ⋅ ⋅\n 3 7 11 ⋅\n 4 8 12 16" end # Vararg methods in method tables diff --git a/test/statistics.jl b/test/statistics.jl index 9c043967846d38..0c85ee0a295de9 100644 --- a/test/statistics.jl +++ b/test/statistics.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, Random +using Test, Random, LinearAlgebra @testset "middle" begin @test middle(3) === 3.0 diff --git a/test/subtype.jl b/test/subtype.jl index 3fd15f5f3f60c7..f6662353fc7853 100644 --- a/test/subtype.jl +++ b/test/subtype.jl @@ -2,6 +2,7 @@ using Base: Bottom using Test +using LinearAlgebra macro UnionAll(var, expr) Expr(:where, esc(expr), esc(var)) @@ -1011,9 +1012,9 @@ f18348(::Type{T}, x::T) where {T<:Any} = 2 @test length(methods(f18348, Tuple{Type{Any},Any})) == 1 # Issue #13165 -@test Symmetric{Float64,Matrix{Float64}} <: LinAlg.RealHermSymComplexHerm -@test Hermitian{Float64,Matrix{Float64}} <: LinAlg.RealHermSymComplexHerm -@test Hermitian{Complex{Float64},Matrix{Complex{Float64}}} <: LinAlg.RealHermSymComplexHerm +@test Symmetric{Float64,Matrix{Float64}} <: LinearAlgebra.RealHermSymComplexHerm +@test Hermitian{Float64,Matrix{Float64}} <: LinearAlgebra.RealHermSymComplexHerm +@test Hermitian{Complex{Float64},Matrix{Complex{Float64}}} <: LinearAlgebra.RealHermSymComplexHerm # Issue #12721 f12721(::T) where {T<:Type{Int}} = true