diff --git a/.travis.yml b/.travis.yml index 87547890..5e1b6284 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ os: - osx - linux julia: - - 0.6 + - 0.7 - nightly matrix: allow_failures: diff --git a/REQUIRE b/REQUIRE index cc8b7c29..4844f5f5 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,7 +1,8 @@ -julia 0.6 -Compat 0.64.0 -OrdinaryDiffEq 3.19.1 -DiffEqCallbacks 1.1 -StochasticDiffEq 4.4.5 -RecursiveArrayTools -WignerSymbols 0.1.1 +julia 0.7-beta2 +OrdinaryDiffEq 4.7.1 +DiffEqCallbacks 2.1 +StochasticDiffEq 5.6.0 +RecursiveArrayTools 0.17.2 +WignerSymbols 0.2.0 +FFTW +Arpack diff --git a/appveyor.yml b/appveyor.yml index 3d66d031..3f6c3e8c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,7 +1,7 @@ environment: matrix: - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.6-latest-win32.exe" - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.7/julia-0.7-latest-win32.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7-latest-win64.exe" - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe" - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe" diff --git a/src/QuantumOptics.jl b/src/QuantumOptics.jl index c5d0dc1c..105067a2 100644 --- a/src/QuantumOptics.jl +++ b/src/QuantumOptics.jl @@ -2,11 +2,14 @@ __precompile__() module QuantumOptics +using SparseArrays, LinearAlgebra + export bases, Basis, GenericBasis, CompositeBasis, basis, tensor, ⊗, permutesystems, - states, StateVector, Bra, Ket, basisstate, + states, StateVector, Bra, Ket, basisstate, norm, dagger, normalize, normalize!, - operators, Operator, expect, variance, identityoperator, ptrace, embed, + operators, Operator, expect, variance, identityoperator, ptrace, embed, dense, tr, + sparse, operators_dense, DenseOperator, projector, dm, operators_sparse, SparseOperator, diagonaloperator, operators_lazysum, LazySum, diff --git a/src/bases.jl b/src/bases.jl index b3a30a57..710fefb5 100644 --- a/src/bases.jl +++ b/src/bases.jl @@ -8,8 +8,6 @@ export Basis, GenericBasis, CompositeBasis, basis, import Base: ==, ^ -using Compat - """ Abstract base class for all specialized bases. @@ -103,8 +101,8 @@ tensor(b1::Basis, b2::Basis) = CompositeBasis(Int[prod(b1.shape); prod(b2.shape) tensor(b1::CompositeBasis, b2::CompositeBasis) = CompositeBasis(Int[b1.shape; b2.shape], Basis[b1.bases; b2.bases]) function tensor(b1::CompositeBasis, b2::Basis) N = length(b1.bases) - shape = Vector{Int}(N+1) - bases = Vector{Basis}(N+1) + shape = Vector{Int}(undef, N+1) + bases = Vector{Basis}(undef, N+1) for i=1:N shape[i] = b1.shape[i] bases[i] = b1.bases[i] @@ -115,8 +113,8 @@ function tensor(b1::CompositeBasis, b2::Basis) end function tensor(b1::Basis, b2::CompositeBasis) N = length(b2.bases) - shape = Vector{Int}(N+1) - bases = Vector{Basis}(N+1) + shape = Vector{Int}(undef, N+1) + bases = Vector{Basis}(undef, N+1) for i=1:N shape[i+1] = b2.shape[i] bases[i+1] = b2.bases[i] diff --git a/src/fock.jl b/src/fock.jl index 1786c606..fce259a0 100644 --- a/src/fock.jl +++ b/src/fock.jl @@ -5,6 +5,7 @@ export FockBasis, number, destroy, create, displace, fockstate, coherentstate import Base: == using ..bases, ..states, ..operators, ..operators_dense, ..operators_sparse +using SparseArrays """ @@ -34,7 +35,7 @@ Number operator for the specified Fock space. """ function number(b::FockBasis) diag = complex.(0.:b.N) - data = spdiagm(diag, 0, b.N+1, b.N+1) + data = spdiagm(0 => diag) SparseOperator(b, data) end @@ -45,7 +46,7 @@ Annihilation operator for the specified Fock space. """ function destroy(b::FockBasis) diag = complex.(sqrt.(1.:b.N)) - data = spdiagm(diag, 1, b.N+1, b.N+1) + data = spdiagm(1 => diag) SparseOperator(b, data) end @@ -56,7 +57,7 @@ Creation operator for the specified Fock space. """ function create(b::FockBasis) diag = complex.(sqrt.(1.:b.N)) - data = spdiagm(diag, -1, b.N+1, b.N+1) + data = spdiagm(-1 => diag) SparseOperator(b, data) end @@ -65,7 +66,7 @@ end Displacement operator ``D(α)`` for the specified Fock space. """ -displace(b::FockBasis, alpha::Number) = expm(full(alpha*create(b) - conj(alpha)*destroy(b))) +displace(b::FockBasis, alpha::Number) = exp(dense(alpha*create(b) - conj(alpha)*destroy(b))) """ fockstate(b::FockBasis, n) @@ -82,7 +83,7 @@ end Coherent state ``|α⟩`` for the specified Fock space. """ -function coherentstate(b::FockBasis, alpha::Number, result=Ket(b, Vector{Complex128}(length(b)))) +function coherentstate(b::FockBasis, alpha::Number, result=Ket(b, Vector{ComplexF64}(undef, length(b)))) alpha = complex(alpha) data = result.data data[1] = exp(-abs2(alpha)/2) diff --git a/src/manybody.jl b/src/manybody.jl index a7f31aff..779decdf 100644 --- a/src/manybody.jl +++ b/src/manybody.jl @@ -9,6 +9,7 @@ import ..fock: number, create, destroy import ..nlevel: transition using ..bases, ..states, ..operators, ..operators_dense, ..operators_sparse +using SparseArrays """ ManyBodyBasis(b, occupations) @@ -26,7 +27,7 @@ mutable struct ManyBodyBasis <: Basis occupations_hash::UInt function ManyBodyBasis(onebodybasis::Basis, occupations::Vector{Vector{Int}}) - new([length(occupations)], onebodybasis, occupations, hash(occupations)) + new([length(occupations)], onebodybasis, occupations, hash(hash.(occupations))) end end @@ -63,8 +64,8 @@ Return a ket state where the system is in the state specified by the given occupation numbers. """ function basisstate(basis::ManyBodyBasis, occupation::Vector{Int}) - index = findfirst(basis.occupations, occupation) - if index == 0 + index = findfirst(isequal(occupation), basis.occupations) + if isa(index, Nothing) throw(ArgumentError("Occupation not included in many-body basis.")) end basisstate(basis, index) @@ -227,7 +228,7 @@ where ``X`` is the N-particle operator, ``x`` is the one-body operator and ``|u⟩`` are the one-body states associated to the different modes of the N-particle basis. """ -function manybodyoperator(basis::ManyBodyBasis, op::T)::T where T <: Operator +function manybodyoperator(basis::ManyBodyBasis, op::T)::T where T<:Operator @assert op.basis_l == op.basis_r if op.basis_l == basis.onebodybasis result = manybodyoperator_1(basis, op) @@ -303,7 +304,7 @@ function manybodyoperator_2(basis::ManyBodyBasis, op::SparseOperator) value = values[j] for m=1:N, n=1:N # println("row:", row, " column:"column, ind_left) - index = ind2sub((S, S, S, S), (column-1)*S^2 + row) + index = Tuple(CartesianIndices((S, S, S, S))[(column-1)*S^2 + row]) C = coefficient(occupations[m], occupations[n], index[1:2], index[3:4]) if C!=0. result.data[m,n] += C*value diff --git a/src/master.jl b/src/master.jl index be7bffe4..799523fe 100644 --- a/src/master.jl +++ b/src/master.jl @@ -8,7 +8,7 @@ using ...bases, ...states, ...operators using ...operators_dense, ...operators_sparse -const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Void} +const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Nothing} """ timeevolution.master_h(tspan, rho0, H, J; ) @@ -20,7 +20,7 @@ Further information can be found at [`master`](@ref). function master_h(tspan, rho0::DenseOperator, H::Operator, J::Vector; rates::DecayRates=nothing, Jdagger::Vector=dagger.(J), - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) check_master(rho0, H, J, Jdagger, rates) tmp = copy(rho0) @@ -43,7 +43,7 @@ function master_nh(tspan, rho0::DenseOperator, Hnh::Operator, J::Vector; rates::DecayRates=nothing, Hnhdagger::Operator=dagger(Hnh), Jdagger::Vector=dagger.(J), - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) check_master(rho0, Hnh, J, Jdagger, rates) tmp = copy(rho0) @@ -86,7 +86,7 @@ non-hermitian Hamiltonian and then calls master_nh which is slightly faster. function master(tspan, rho0::DenseOperator, H::Operator, J::Vector; rates::DecayRates=nothing, Jdagger::Vector=dagger.(J), - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) isreducible = check_master(rho0, H, J, Jdagger, rates) if !isreducible @@ -130,7 +130,7 @@ at [`master_dynamic`](@ref). """ function master_nh_dynamic(tspan, rho0::DenseOperator, f::Function; rates::DecayRates=nothing, - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) tmp = copy(rho0) dmaster_(t, rho::DenseOperator, drho::DenseOperator) = dmaster_nh_dynamic(t, rho, f, rates, drho, tmp) @@ -164,7 +164,7 @@ operators: """ function master_dynamic(tspan, rho0::DenseOperator, f::Function; rates::DecayRates=nothing, - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) tmp = copy(rho0) dmaster_(t, rho::DenseOperator, drho::DenseOperator) = dmaster_h_dynamic(t, rho, f, rates, drho, tmp) @@ -181,13 +181,13 @@ master_nh_dynamic(tspan, psi0::Ket, f::Function; kwargs...) = master_nh_dynamic( # Recasting needed for the ODE solver is just providing the underlying data -function recast!(x::Array{Complex128, 2}, rho::DenseOperator) +function recast!(x::Array{ComplexF64, 2}, rho::DenseOperator) rho.data = x end -recast!(rho::DenseOperator, x::Array{Complex128, 2}) = nothing +recast!(rho::DenseOperator, x::Array{ComplexF64, 2}) = nothing function integrate_master(tspan, df::Function, rho0::DenseOperator, - fout::Union{Void, Function}; kwargs...) + fout::Union{Nothing, Function}; kwargs...) tspan_ = convert(Vector{Float64}, tspan) x0 = rho0.data state = DenseOperator(rho0.basis_l, rho0.basis_r, rho0.data) @@ -206,7 +206,7 @@ end # or a matrix. function dmaster_h(rho::DenseOperator, H::Operator, - rates::Void, J::Vector, Jdagger::Vector, + rates::Nothing, J::Vector, Jdagger::Vector, drho::DenseOperator, tmp::DenseOperator) operators.gemm!(-1im, H, rho, 0, drho) operators.gemm!(1im, rho, H, 1, drho) @@ -257,7 +257,7 @@ function dmaster_h(rho::DenseOperator, H::Operator, end function dmaster_nh(rho::DenseOperator, Hnh::Operator, Hnh_dagger::Operator, - rates::Void, J::Vector, Jdagger::Vector, + rates::Nothing, J::Vector, Jdagger::Vector, drho::DenseOperator, tmp::DenseOperator) operators.gemm!(-1im, Hnh, rho, 0, drho) operators.gemm!(1im, rho, Hnh_dagger, 1, drho) diff --git a/src/mcwf.jl b/src/mcwf.jl index 6ffd4c4f..4ee168c4 100644 --- a/src/mcwf.jl +++ b/src/mcwf.jl @@ -6,13 +6,15 @@ using ...bases, ...states, ...operators using ...operators_dense, ...operators_sparse using ..timeevolution using ...operators_lazysum, ...operators_lazytensor, ...operators_lazyproduct +using Random, LinearAlgebra import OrdinaryDiffEq + # TODO: Remove imports import DiffEqCallbacks, RecursiveArrayTools.copyat_or_push! import ..recast! -Base.@pure pure_inference(fout,T) = Core.Inference.return_type(fout, T) +Base.@pure pure_inference(fout,T) = Core.Compiler.return_type(fout, T) -const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Void} +const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Nothing} """ mcwf_h(tspan, rho0, Hnh, J; ) @@ -112,7 +114,7 @@ function mcwf(tspan, psi0::Ket, H::Operator, J::Vector; kwargs...) else Hnh = copy(H) - if typeof(rates) == Void + if typeof(rates) == Nothing for i=1:length(J) Hnh -= 0.5im*Jdagger[i]*J[i] end @@ -255,11 +257,11 @@ function integrate_mcwf(dmcwf::Function, jumpfun::Function, tspan, kwargs...) tmp = copy(psi0) - as_ket(x::Vector{Complex128}) = Ket(psi0.basis, x) + as_ket(x::Vector{ComplexF64}) = Ket(psi0.basis, x) as_vector(psi::Ket) = psi.data rng = MersenneTwister(convert(UInt, seed)) jumpnorm = Ref(rand(rng)) - djumpnorm(x::Vector{Complex128}, t, integrator) = norm(as_ket(x))^2 - (1-jumpnorm[]) + djumpnorm(x::Vector{ComplexF64}, t, integrator) = norm(as_ket(x))^2 - (1-jumpnorm[]) if !display_beforeevent && !display_afterevent function dojump(integrator) @@ -280,7 +282,7 @@ function integrate_mcwf(dmcwf::Function, jumpfun::Function, tspan, else # Temporary workaround until proper tooling for saving # TODO: Replace by proper call to timeevolution.integrate - function fout_(x::Vector{Complex128}, t::Float64, integrator) + function fout_(x::Vector{ComplexF64}, t::Float64, integrator) recast!(x, state) fout(t, state) end @@ -322,7 +324,7 @@ function integrate_mcwf(dmcwf::Function, jumpfun::Function, tspan, save_positions = (false,false)) full_cb = OrdinaryDiffEq.CallbackSet(callback,cb,scb) - function df_(dx::Vector{Complex128}, x::Vector{Complex128}, p, t) + function df_(dx::Vector{ComplexF64}, x::Vector{ComplexF64}, p, t) recast!(x, state) recast!(dx, dstate) dmcwf(t, state, dstate) @@ -344,7 +346,7 @@ function integrate_mcwf(dmcwf::Function, jumpfun::Function, tspan, end function integrate_mcwf(dmcwf::Function, jumpfun::Function, tspan, - psi0::Ket, seed, fout::Void; + psi0::Ket, seed, fout::Nothing; kwargs...) function fout_(t, x) psi = copy(x) @@ -366,7 +368,7 @@ Default jump function. * `J`: List of jump operators. * `psi_new`: Result of jump. """ -function jump(rng, t::Float64, psi::Ket, J::Vector, psi_new::Ket, rates::Void) +function jump(rng, t::Float64, psi::Ket, J::Vector, psi_new::Ket, rates::Nothing) if length(J)==1 operators.gemv!(complex(1.), J[1], psi, complex(0.), psi_new) psi_new.data ./= norm(psi_new) @@ -409,7 +411,7 @@ The non-hermitian Hamiltonian is given in two parts - the hermitian part H and the jump operators J. """ function dmcwf_h(psi::Ket, H::Operator, - J::Vector, Jdagger::Vector, dpsi::Ket, tmp::Ket, rates::Void) + J::Vector, Jdagger::Vector, dpsi::Ket, tmp::Ket, rates::Nothing) operators.gemv!(complex(0,-1.), H, psi, complex(0.), dpsi) for i=1:length(J) operators.gemv!(complex(1.), J[i], psi, complex(0.), tmp) @@ -488,13 +490,13 @@ corresponding set of jump operators is calculated. """ function diagonaljumps(rates::Matrix{Float64}, J::Vector{T}) where T <: Operator @assert length(J) == size(rates)[1] == size(rates)[2] - d, v = eig(rates) + d, v = eigen(rates) d, [sum([v[j, i]*J[j] for j=1:length(d)]) for i=1:length(d)] end function diagonaljumps(rates::Matrix{Float64}, J::Vector{T}) where T <: Union{LazySum, LazyTensor, LazyProduct} @assert length(J) == size(rates)[1] == size(rates)[2] - d, v = eig(rates) + d, v = eigen(rates) d, [LazySum([v[j, i]*J[j] for j=1:length(d)]...) for i=1:length(d)] end diff --git a/src/metrics.jl b/src/metrics.jl index 216c509a..f9f6f6a4 100644 --- a/src/metrics.jl +++ b/src/metrics.jl @@ -6,6 +6,7 @@ export tracenorm, tracenorm_h, tracenorm_nh, logarithmic_negativity using ..bases, ..operators, ..operators_dense, ..states +using LinearAlgebra """ tracenorm(rho) @@ -154,7 +155,7 @@ natural logarithm and ``\\log(0) ≡ 0``. """ function entropy_vn(rho::DenseOperator; tol::Float64=1e-15) evals = eigvals(rho.data) - evals[abs.(evals) .< tol] = 0.0 + evals[abs.(evals) .< tol] .= 0.0 sum([d == 0 ? 0 : -d*log(d) for d=evals]) end entropy_vn(psi::StateVector; kwargs...) = entropy_vn(dm(psi); kwargs...) @@ -172,7 +173,7 @@ F(ρ, σ) = Tr\\left(\\sqrt{\\sqrt{ρ}σ\\sqrt{ρ}}\\right), where ``\\sqrt{ρ}=\\sum_n\\sqrt{λ_n}|ψ⟩⟨ψ|``. """ -fidelity(rho::DenseOperator, sigma::DenseOperator) = trace(sqrtm(sqrtm(rho.data)*sigma.data*sqrtm(rho.data))) +fidelity(rho::DenseOperator, sigma::DenseOperator) = tr(sqrt(sqrt(rho.data)*sigma.data*sqrt(rho.data))) """ @@ -197,7 +198,7 @@ function ptranspose(rho::DenseOperator, index::Int=1) m = Int(prod(rho_perm.basis_l.shape[1:N-1])) n = rho_perm.basis_l.shape[N] for i=1:n, j=1:n - rho_perm.data[m*(i-1)+1:m*i, m*(j-1)+1:m*j] = transpose(rho_perm.data[m*(i-1)+1:m*i, m*(j-1)+1:m*j]) + rho_perm.data[m*(i-1)+1:m*i, m*(j-1)+1:m*j] = permutedims(rho_perm.data[m*(i-1)+1:m*i, m*(j-1)+1:m*j]) end return permutesystems(rho_perm, perm) @@ -220,7 +221,7 @@ Negativity of rho with respect to subsystem index. The negativity of a density matrix ρ is defined as ```math -N(ρ) = \|ρᵀ\|, +N(ρ) = \\|ρᵀ\\|, ``` where `ρᵀ` is the partial transpose. """ @@ -233,7 +234,7 @@ negativity(rho::DenseOperator, index::Int) = 0.5*(tracenorm(ptranspose(rho, inde The logarithmic negativity of a density matrix ρ is defined as ```math -N(ρ) = \log₂\|ρᵀ\|, +N(ρ) = \\log₂\\|ρᵀ\\|, ``` where `ρᵀ` is the partial transpose. """ diff --git a/src/operators.jl b/src/operators.jl index 4cdfadb3..7825b22e 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -1,15 +1,15 @@ module operators export Operator, length, basis, dagger, ishermitian, tensor, embed, - trace, ptrace, normalize, normalize!, expect, variance, - expm, permutesystems, identityoperator + tr, ptrace, normalize, normalize!, expect, variance, + exp, permutesystems, identityoperator, dense -import Base: ==, +, -, *, /, length, trace, one, ishermitian, expm, conj, conj! +import Base: ==, +, -, *, /, ^, length, one, exp, conj, conj! +import LinearAlgebra: tr, ishermitian import ..bases: basis, tensor, ptrace, permutesystems, samebases, check_samebases, multiplicable import ..states: dagger, normalize, normalize! -using Compat using ..sortedindices, ..bases, ..states @@ -28,8 +28,8 @@ abstract type Operator end # Common error messages -arithmetic_unary_error(funcname, x::Operator) = throw(ArgumentError("$funcname is not defined for this type of operator: $(typeof(x)).\nTry to convert to another operator type first with e.g. full() or sparse().")) -arithmetic_binary_error(funcname, a::Operator, b::Operator) = throw(ArgumentError("$funcname is not defined for this combination of types of operators: $(typeof(a)), $(typeof(b)).\nTry to convert to a common operator type first with e.g. full() or sparse().")) +arithmetic_unary_error(funcname, x::Operator) = throw(ArgumentError("$funcname is not defined for this type of operator: $(typeof(x)).\nTry to convert to another operator type first with e.g. dense() or sparse().")) +arithmetic_binary_error(funcname, a::Operator, b::Operator) = throw(ArgumentError("$funcname is not defined for this combination of types of operators: $(typeof(a)), $(typeof(b)).\nTry to convert to a common operator type first with e.g. dense() or sparse().")) addnumbererror() = throw(ArgumentError("Can't add or subtract a number and an operator. You probably want 'op + identityoperator(op)*x'.")) length(a::Operator) = length(a.basis_l)::Int*length(a.basis_r)::Int @@ -46,6 +46,7 @@ basis(a::Operator) = (check_samebases(a); a.basis_l) -(a::Operator, b::Number) = addnumbererror() *(a::Operator, b::Operator) = arithmetic_binary_error("Multiplication", a, b) +^(a::Operator, b::Int) = Base.power_by_squaring(a, b) dagger(a::Operator) = arithmetic_unary_error("Hermitian conjugate", a) @@ -53,6 +54,8 @@ dagger(a::Operator) = arithmetic_unary_error("Hermitian conjugate", a) conj(a::Operator) = arithmetic_unary_error("Complex conjugate", a) conj!(a::Operator) = conj(a::Operator) +dense(a::Operator) = arithmetic_unary_error("Conversion to dense", a) + """ ishermitian(op::Operator) @@ -82,7 +85,7 @@ function embed(basis_l::CompositeBasis, basis_r::CompositeBasis, @assert length(basis_r.bases) == N @assert length(indices) == length(operators) sortedindices.check_indices(N, indices) - tensor([i ∈ indices ? operators[findfirst(indices, i)] : identityoperator(T, basis_l.bases[i], basis_r.bases[i]) for i=1:N]...) + tensor([i ∈ indices ? operators[indexin(i, indices)[1]] : identityoperator(T, basis_l.bases[i], basis_r.bases[i]) for i=1:N]...) end embed(basis_l::CompositeBasis, basis_r::CompositeBasis, index::Int, op::Operator) = embed(basis_l, basis_r, Int[index], [op]) embed(basis::CompositeBasis, index::Int, op::Operator) = embed(basis, basis, Int[index], [op]) @@ -112,7 +115,7 @@ function embed(basis_l::CompositeBasis, basis_r::CompositeBasis, if i in complement_indices_flat push!(operators_flat, identityoperator(T, basis_l.bases[i], basis_r.bases[i])) elseif i in start_indices_flat - push!(operators_flat, operator_list[findfirst(start_indices_flat, i)]) + push!(operators_flat, operator_list[indexin(i, start_indices_flat)[1]]) end end return tensor(operators_flat...) @@ -129,25 +132,25 @@ embed(basis::CompositeBasis, operators::Dict{Vector{Int}, T}; kwargs...) where { """ - trace(x::Operator) + tr(x::Operator) Trace of the given operator. """ -trace(x::Operator) = arithmetic_unary_error("Trace", x) +tr(x::Operator) = arithmetic_unary_error("Trace", x) ptrace(a::Operator, index::Vector{Int}) = arithmetic_unary_error("Partial trace", a) """ normalize(op) -Return the normalized operator so that its `trace(op)` is one. +Return the normalized operator so that its `tr(op)` is one. """ -normalize(op::Operator) = op/trace(op) +normalize(op::Operator) = op/tr(op) """ normalize!(op) -In-place normalization of the given operator so that its `trace(x)` is one. +In-place normalization of the given operator so that its `tr(x)` is one. """ normalize!(op::Operator) = throw(ArgumentError("normalize! is not defined for this type of operator: $(typeof(op)).\n You may have to fall back to the non-inplace version 'normalize()'.")) @@ -158,9 +161,8 @@ Expectation value of the given operator `op` for the specified `state`. `state` can either be a (density) operator or a ket. """ - expect(op::Operator, state::Ket) = dagger(state) * op * state -expect(op::Operator, state::Operator) = trace(op*state) +expect(op::Operator, state::Operator) = tr(op*state) """ expect(index, op, state) @@ -218,11 +220,11 @@ variance(indices::Vector{Int}, op::Operator, states::Vector) = [variance(indices """ - expm(op::Operator) + exp(op::Operator) Operator exponential. """ -expm(op::Operator) = throw(ArgumentError("expm() is not defined for this type of operator: $(typeof(op)).\nTry to convert to dense operator first with full().")) +exp(op::Operator) = throw(ArgumentError("exp() is not defined for this type of operator: $(typeof(op)).\nTry to convert to dense operator first with dense().")) permutesystems(a::Operator, perm::Vector{Int}) = arithmetic_unary_error("Permutations of subsystems", a) @@ -271,7 +273,7 @@ function check_ptrace_arguments(a::Operator, indices::Vector{Int}) throw(ArgumentError("Partial trace can only be applied onto operators wich have the same number of subsystems in the left basis and right basis.")) end if rank == length(indices) - throw(ArgumentError("Partial trace can't be used to trace out all subsystems - use trace() instead.")) + throw(ArgumentError("Partial trace can't be used to trace out all subsystems - use tr() instead.")) end sortedindices.check_indices(length(a.basis_l.shape), indices) for i=indices @@ -282,7 +284,7 @@ function check_ptrace_arguments(a::Operator, indices::Vector{Int}) end function check_ptrace_arguments(a::StateVector, indices::Vector{Int}) if length(basis(a).shape) == length(indices) - throw(ArgumentError("Partial trace can't be used to trace out all subsystems - use trace() instead.")) + throw(ArgumentError("Partial trace can't be used to trace out all subsystems - use tr() instead.")) end sortedindices.check_indices(length(basis(a).shape), indices) end diff --git a/src/operators_dense.jl b/src/operators_dense.jl index d2f7fb09..50714a5a 100644 --- a/src/operators_dense.jl +++ b/src/operators_dense.jl @@ -1,11 +1,11 @@ module operators_dense -export DenseOperator, full, projector, dm +export DenseOperator, dense, projector, dm import Base: ==, +, -, *, / import ..operators -using Base.LinAlg, Base.Cartesian +using LinearAlgebra, Base.Cartesian using ..bases, ..states, ..operators @@ -19,24 +19,23 @@ The matrix consisting of complex floats is stored in the `data` field. mutable struct DenseOperator <: Operator basis_l::Basis basis_r::Basis - data::Matrix{Complex128} + data::Matrix{ComplexF64} DenseOperator(b1::Basis, b2::Basis, data) = length(b1) == size(data, 1) && length(b2) == size(data, 2) ? new(b1, b2, data) : throw(DimensionMismatch()) end DenseOperator(b::Basis, data) = DenseOperator(b, b, data) -DenseOperator(b1::Basis, b2::Basis) = DenseOperator(b1, b2, zeros(Complex128, length(b1), length(b2))) +DenseOperator(b1::Basis, b2::Basis) = DenseOperator(b1, b2, zeros(ComplexF64, length(b1), length(b2))) DenseOperator(b::Basis) = DenseOperator(b, b) -DenseOperator(op::Operator) = full(op) +DenseOperator(op::Operator) = dense(op) Base.copy(x::DenseOperator) = DenseOperator(x.basis_l, x.basis_r, copy(x.data)) """ - full(op::Operator) + dense(op::Operator) Convert an arbitrary Operator into a [`DenseOperator`](@ref). """ -Base.full(x::Operator) = throw(ArgumentError("Conversion from $(typeof(x)) to a DenseOperator not implemented.")) -Base.full(x::DenseOperator) = copy(x) +operators.dense(x::DenseOperator) = copy(x) ==(x::DenseOperator, y::DenseOperator) = (x.basis_l == y.basis_l) && (x.basis_r == y.basis_r) && (x.data == y.data) @@ -48,7 +47,7 @@ Base.full(x::DenseOperator) = copy(x) -(a::DenseOperator, b::DenseOperator) = (check_samebases(a,b); DenseOperator(a.basis_l, a.basis_r, a.data-b.data)) *(a::DenseOperator, b::Ket) = (check_multiplicable(a, b); Ket(a.basis_l, a.data*b.data)) -*(a::Bra, b::DenseOperator) = (check_multiplicable(a, b); Bra(b.basis_r, b.data.'*a.data)) +*(a::Bra, b::DenseOperator) = (check_multiplicable(a, b); Bra(b.basis_r, transpose(b.data)*a.data)) *(a::DenseOperator, b::DenseOperator) = (check_multiplicable(a, b); DenseOperator(a.basis_l, b.basis_r, a.data*b.data)) *(a::DenseOperator, b::Number) = DenseOperator(a.basis_l, a.basis_r, complex(b)*a.data) *(a::Number, b::DenseOperator) = DenseOperator(b.basis_l, b.basis_r, complex(a)*b.data) @@ -97,7 +96,7 @@ Outer product ``|x⟩⟨y|`` of the given states. operators.tensor(a::Ket, b::Bra) = DenseOperator(a.basis, b.basis, reshape(kron(b.data, a.data), prod(a.basis.shape), prod(b.basis.shape))) -operators.trace(op::DenseOperator) = (check_samebases(op); trace(op.data)) +operators.tr(op::DenseOperator) = (check_samebases(op); tr(op.data)) function operators.ptrace(a::DenseOperator, indices::Vector{Int}) operators.check_ptrace_arguments(a, indices) @@ -123,7 +122,7 @@ function operators.ptrace(psi::Bra, indices::Vector{Int}) return DenseOperator(b_, b_, result) end -operators.normalize!(op::DenseOperator) = scale!(op.data, 1./trace(op)) +operators.normalize!(op::DenseOperator) = (rmul!(op.data, 1.0/tr(op)); nothing) function operators.expect(op::DenseOperator, state::Ket)# where T <: Union{Ket, Bra} check_samebases(op.basis_r, state.basis) @@ -134,28 +133,28 @@ end function operators.expect(op::DenseOperator, state::Operator) check_samebases(op.basis_r, state.basis_l) check_samebases(op.basis_l, state.basis_r) - result = Complex128(0.) + result = ComplexF64(0.) @inbounds for i=1:size(op.data, 1), j=1:size(op.data,2) result += op.data[i,j]*state.data[j,i] end result end -function operators.expm(op::DenseOperator) +function operators.exp(op::DenseOperator) check_samebases(op) - return DenseOperator(op.basis_l, op.basis_r, expm(op.data)) + return DenseOperator(op.basis_l, op.basis_r, exp(op.data)) end function operators.permutesystems(a::DenseOperator, perm::Vector{Int}) @assert length(a.basis_l.bases) == length(a.basis_r.bases) == length(perm) @assert isperm(perm) data = reshape(a.data, [a.basis_l.shape; a.basis_r.shape]...) - data = permutedims(data, [perm; perm + length(perm)]) + data = permutedims(data, [perm; perm .+ length(perm)]) data = reshape(data, length(a.basis_l), length(a.basis_r)) DenseOperator(permutesystems(a.basis_l, perm), permutesystems(a.basis_r, perm), data) end -operators.identityoperator(::Type{DenseOperator}, b1::Basis, b2::Basis) = DenseOperator(b1, b2, eye(Complex128, length(b1), length(b2))) +operators.identityoperator(::Type{DenseOperator}, b1::Basis, b2::Basis) = DenseOperator(b1, b2, Matrix{ComplexF64}(I, length(b1), length(b2))) """ projector(a::Ket, b::Bra) @@ -197,21 +196,21 @@ function _strides(shape::Vector{Int}) end # Dense operator version -@generated function _ptrace(::Type{Val{RANK}}, a::Matrix{Complex128}, +@generated function _ptrace(::Type{Val{RANK}}, a::Matrix{ComplexF64}, shape_l::Vector{Int}, shape_r::Vector{Int}, indices::Vector{Int}) where RANK return quote a_strides_l = _strides(shape_l) result_shape_l = copy(shape_l) - result_shape_l[indices] = 1 + result_shape_l[indices] .= 1 result_strides_l = _strides(result_shape_l) a_strides_r = _strides(shape_r) result_shape_r = copy(shape_r) - result_shape_r[indices] = 1 + result_shape_r[indices] .= 1 result_strides_r = _strides(result_shape_r) N_result_l = prod(result_shape_l) N_result_r = prod(result_shape_r) - result = zeros(Complex128, N_result_l, N_result_r) + result = zeros(ComplexF64, N_result_l, N_result_r) @nexprs 1 (d->(Jr_{$RANK}=1;Ir_{$RANK}=1)) @nloops $RANK ir (d->1:shape_r[d]) (d->(Ir_{d-1}=Ir_d; Jr_{d-1}=Jr_d)) (d->(Ir_d+=a_strides_r[d]; if !(d in indices) Jr_d+=result_strides_r[d] end)) begin @nexprs 1 (d->(Jl_{$RANK}=1;Il_{$RANK}=1)) @@ -223,15 +222,15 @@ end end end -@generated function _ptrace_ket(::Type{Val{RANK}}, a::Vector{Complex128}, +@generated function _ptrace_ket(::Type{Val{RANK}}, a::Vector{ComplexF64}, shape::Vector{Int}, indices::Vector{Int}) where RANK return quote a_strides = _strides(shape) result_shape = copy(shape) - result_shape[indices] = 1 + result_shape[indices] .= 1 result_strides = _strides(result_shape) N_result = prod(result_shape) - result = zeros(Complex128, N_result, N_result) + result = zeros(ComplexF64, N_result, N_result) @nexprs 1 (d->(Jr_{$RANK}=1;Ir_{$RANK}=1)) @nloops $RANK ir (d->1:shape[d]) (d->(Ir_{d-1}=Ir_d; Jr_{d-1}=Jr_d)) (d->(Ir_d+=a_strides[d]; if !(d in indices) Jr_d+=result_strides[d] end)) begin @nexprs 1 (d->(Jl_{$RANK}=1;Il_{$RANK}=1)) @@ -243,15 +242,15 @@ end end end -@generated function _ptrace_bra(::Type{Val{RANK}}, a::Vector{Complex128}, +@generated function _ptrace_bra(::Type{Val{RANK}}, a::Vector{ComplexF64}, shape::Vector{Int}, indices::Vector{Int}) where RANK return quote a_strides = _strides(shape) result_shape = copy(shape) - result_shape[indices] = 1 + result_shape[indices] .= 1 result_strides = _strides(result_shape) N_result = prod(result_shape) - result = zeros(Complex128, N_result, N_result) + result = zeros(ComplexF64, N_result, N_result) @nexprs 1 (d->(Jr_{$RANK}=1;Ir_{$RANK}=1)) @nloops $RANK ir (d->1:shape[d]) (d->(Ir_{d-1}=Ir_d; Jr_{d-1}=Jr_d)) (d->(Ir_d+=a_strides[d]; if !(d in indices) Jr_d+=result_strides[d] end)) begin @nexprs 1 (d->(Jl_{$RANK}=1;Il_{$RANK}=1)) @@ -264,13 +263,13 @@ end end # Fast in-place multiplication -operators.gemm!(alpha, a::Matrix{Complex128}, b::Matrix{Complex128}, beta, result::Matrix{Complex128}) = BLAS.gemm!('N', 'N', convert(Complex128, alpha), a, b, convert(Complex128, beta), result) -operators.gemv!(alpha, a::Matrix{Complex128}, b::Vector{Complex128}, beta, result::Vector{Complex128}) = BLAS.gemv!('N', convert(Complex128, alpha), a, b, convert(Complex128, beta), result) -operators.gemv!(alpha, a::Vector{Complex128}, b::Matrix{Complex128}, beta, result::Vector{Complex128}) = BLAS.gemv!('T', convert(Complex128, alpha), b, a, convert(Complex128, beta), result) +operators.gemm!(alpha, a::Matrix{ComplexF64}, b::Matrix{ComplexF64}, beta, result::Matrix{ComplexF64}) = BLAS.gemm!('N', 'N', convert(ComplexF64, alpha), a, b, convert(ComplexF64, beta), result) +operators.gemv!(alpha, a::Matrix{ComplexF64}, b::Vector{ComplexF64}, beta, result::Vector{ComplexF64}) = BLAS.gemv!('N', convert(ComplexF64, alpha), a, b, convert(ComplexF64, beta), result) +operators.gemv!(alpha, a::Vector{ComplexF64}, b::Matrix{ComplexF64}, beta, result::Vector{ComplexF64}) = BLAS.gemv!('T', convert(ComplexF64, alpha), b, a, convert(ComplexF64, beta), result) -operators.gemm!(alpha, a::DenseOperator, b::DenseOperator, beta, result::DenseOperator) = operators.gemm!(convert(Complex128, alpha), a.data, b.data, convert(Complex128, beta), result.data) -operators.gemv!(alpha, a::DenseOperator, b::Ket, beta, result::Ket) = operators.gemv!(convert(Complex128, alpha), a.data, b.data, convert(Complex128, beta), result.data) -operators.gemv!(alpha, a::Bra, b::DenseOperator, beta, result::Bra) = operators.gemv!(convert(Complex128, alpha), a.data, b.data, convert(Complex128, beta), result.data) +operators.gemm!(alpha, a::DenseOperator, b::DenseOperator, beta, result::DenseOperator) = operators.gemm!(convert(ComplexF64, alpha), a.data, b.data, convert(ComplexF64, beta), result.data) +operators.gemv!(alpha, a::DenseOperator, b::Ket, beta, result::Ket) = operators.gemv!(convert(ComplexF64, alpha), a.data, b.data, convert(ComplexF64, beta), result.data) +operators.gemv!(alpha, a::Bra, b::DenseOperator, beta, result::Bra) = operators.gemv!(convert(ComplexF64, alpha), a.data, b.data, convert(ComplexF64, beta), result.data) # Multiplication for Operators in terms of their gemv! implementation diff --git a/src/operators_lazyproduct.jl b/src/operators_lazyproduct.jl index 4aece764..67dbd6dd 100644 --- a/src/operators_lazyproduct.jl +++ b/src/operators_lazyproduct.jl @@ -6,6 +6,7 @@ import Base: ==, *, /, +, - import ..operators using ..bases, ..states, ..operators, ..operators_dense +using SparseArrays """ @@ -21,7 +22,7 @@ multiplication with numbers. mutable struct LazyProduct <: Operator basis_l::Basis basis_r::Basis - factor::Complex128 + factor::ComplexF64 operators::Vector{Operator} function LazyProduct(operators::Vector{Operator}, factor::Number=1) @@ -39,8 +40,8 @@ LazyProduct(operators::Operator...) = LazyProduct(Operator[operators...]) Base.copy(x::LazyProduct) = LazyProduct([copy(op) for op in x.operators], x.factor) -Base.full(op::LazyProduct) = op.factor*prod(full.(op.operators)) -Base.sparse(op::LazyProduct) = op.factor*prod(sparse.(op.operators)) +operators.dense(op::LazyProduct) = op.factor*prod(dense.(op.operators)) +SparseArrays.sparse(op::LazyProduct) = op.factor*prod(sparse.(op.operators)) ==(x::LazyProduct, y::LazyProduct) = (x.basis_l == y.basis_l) && (x.basis_r == y.basis_r) && x.operators==y.operators && x.factor == y.factor @@ -57,9 +58,9 @@ Base.sparse(op::LazyProduct) = op.factor*prod(sparse.(op.operators)) operators.dagger(op::LazyProduct) = LazyProduct(dagger.(reverse(op.operators)), conj(op.factor)) -operators.trace(op::LazyProduct) = throw(ArgumentError("Trace of LazyProduct is not defined. Try to convert to another operator type first with e.g. full() or sparse().")) +operators.tr(op::LazyProduct) = throw(ArgumentError("Trace of LazyProduct is not defined. Try to convert to another operator type first with e.g. dense() or sparse().")) -operators.ptrace(op::LazyProduct, indices::Vector{Int}) = throw(ArgumentError("Partial trace of LazyProduct is not defined. Try to convert to another operator type first with e.g. full() or sparse().")) +operators.ptrace(op::LazyProduct, indices::Vector{Int}) = throw(ArgumentError("Partial trace of LazyProduct is not defined. Try to convert to another operator type first with e.g. dense() or sparse().")) operators.permutesystems(op::LazyProduct, perm::Vector{Int}) = LazyProduct(Operator[permutesystems(op_i, perm) for op_i in op.operators], op.factor) diff --git a/src/operators_lazysum.jl b/src/operators_lazysum.jl index a7965dde..94f4c263 100644 --- a/src/operators_lazysum.jl +++ b/src/operators_lazysum.jl @@ -4,9 +4,10 @@ export LazySum import Base: ==, *, /, +, - import ..operators +import SparseArrays: sparse using ..bases, ..states, ..operators, ..operators_dense - +using SparseArrays, LinearAlgebra """ LazySum([factors,] operators) @@ -20,10 +21,10 @@ stored in the `operators` field. mutable struct LazySum <: Operator basis_l::Basis basis_r::Basis - factors::Vector{Complex128} + factors::Vector{ComplexF64} operators::Vector{Operator} - function LazySum(factors::Vector{Complex128}, operators::Vector{Operator}) + function LazySum(factors::Vector{ComplexF64}, operators::Vector{Operator}) @assert length(operators)>0 @assert length(operators)==length(factors) for i = 2:length(operators) @@ -34,12 +35,12 @@ mutable struct LazySum <: Operator end end LazySum(factors::Vector{T}, operators::Vector) where {T<:Number} = LazySum(complex(factors), Operator[op for op in operators]) -LazySum(operators::Operator...) = LazySum(ones(Complex128, length(operators)), Operator[operators...]) +LazySum(operators::Operator...) = LazySum(ones(ComplexF64, length(operators)), Operator[operators...]) Base.copy(x::LazySum) = LazySum(copy(x.factors), [copy(op) for op in x.operators]) -Base.full(op::LazySum) = sum(op.factors .* full.(op.operators)) -Base.sparse(op::LazySum) = sum(op.factors .* sparse.(op.operators)) +operators.dense(op::LazySum) = sum(op.factors .* dense.(op.operators)) +SparseArrays.sparse(op::LazySum) = sum(op.factors .* sparse.(op.operators)) ==(x::LazySum, y::LazySum) = (x.basis_l == y.basis_l) && (x.basis_r == y.basis_r) && x.operators==y.operators && x.factors==y.factors @@ -56,7 +57,7 @@ Base.sparse(op::LazySum) = sum(op.factors .* sparse.(op.operators)) operators.dagger(op::LazySum) = LazySum(conj.(op.factors), dagger.(op.operators)) -operators.trace(op::LazySum) = sum(op.factors .* trace.(op.operators)) +operators.tr(op::LazySum) = sum(op.factors .* tr.(op.operators)) function operators.ptrace(op::LazySum, indices::Vector{Int}) operators.check_ptrace_arguments(op, indices) @@ -65,7 +66,7 @@ function operators.ptrace(op::LazySum, indices::Vector{Int}) LazySum(op.factors, D) end -operators.normalize!(op::LazySum) = (op.factors /= trace(op)) +operators.normalize!(op::LazySum) = (op.factors /= tr(op); nothing) operators.permutesystems(op::LazySum, perm::Vector{Int}) = LazySum(op.factors, Operator[permutesystems(op_i, perm) for op_i in op.operators]) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index ca51ee9b..081d4f4e 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -7,6 +7,7 @@ import ..operators using ..sortedindices, ..bases, ..states, ..operators using ..operators_dense, ..operators_sparse +using SparseArrays, LinearAlgebra """ @@ -22,7 +23,7 @@ multiplication with numbers. mutable struct LazyTensor <: Operator basis_l::CompositeBasis basis_r::CompositeBasis - factor::Complex128 + factor::ComplexF64 indices::Vector{Int} operators::Vector{Operator} @@ -69,17 +70,17 @@ Base.copy(x::LazyTensor) = LazyTensor(x.basis_l, x.basis_r, copy(x.indices), [co Return the suboperator corresponding to the subsystem specified by `index`. Fails if there is no corresponding operator (i.e. it would be an identity operater). """ -suboperator(op::LazyTensor, index::Int) = op.operators[findfirst(op.indices, index)] +suboperator(op::LazyTensor, index::Int) = op.operators[findfirst(isequal(index), op.indices)] """ suboperators(op::LazyTensor, index) Return the suboperators corresponding to the subsystems specified by `indices`. Fails if there is no corresponding operator (i.e. it would be an identity operater). """ -suboperators(op::LazyTensor, indices::Vector{Int}) = op.operators[[findfirst(op.indices, i) for i in indices]] +suboperators(op::LazyTensor, indices::Vector{Int}) = op.operators[[findfirst(isequal(i), op.indices) for i in indices]] -Base.full(op::LazyTensor) = op.factor*embed(op.basis_l, op.basis_r, op.indices, DenseOperator[full(x) for x in op.operators]) -Base.sparse(op::LazyTensor) = op.factor*embed(op.basis_l, op.basis_r, op.indices, SparseOperator[sparse(x) for x in op.operators]) +operators.dense(op::LazyTensor) = op.factor*embed(op.basis_l, op.basis_r, op.indices, DenseOperator[dense(x) for x in op.operators]) +SparseArrays.sparse(op::LazyTensor) = op.factor*embed(op.basis_l, op.basis_r, op.indices, SparseOperator[sparse(x) for x in op.operators]) ==(x::LazyTensor, y::LazyTensor) = (x.basis_l == y.basis_l) && (x.basis_r == y.basis_r) && x.operators==y.operators && x.factor==y.factor @@ -90,7 +91,7 @@ Base.sparse(op::LazyTensor) = op.factor*embed(op.basis_l, op.basis_r, op.indices function *(a::LazyTensor, b::LazyTensor) check_multiplicable(a, b) indices = sortedindices.union(a.indices, b.indices) - ops = Vector{Operator}(length(indices)) + ops = Vector{Operator}(undef, length(indices)) for n in 1:length(indices) i = indices[n] in_a = i in a.indices @@ -127,14 +128,14 @@ end operators.dagger(op::LazyTensor) = LazyTensor(op.basis_r, op.basis_l, op.indices, Operator[dagger(x) for x in op.operators], conj(op.factor)) -operators.tensor(a::LazyTensor, b::LazyTensor) = LazyTensor(a.basis_l ⊗ b.basis_l, a.basis_r ⊗ b.basis_r, [a.indices; b.indices+length(a.basis_l.bases)], Operator[a.operators; b.operators], a.factor*b.factor) +operators.tensor(a::LazyTensor, b::LazyTensor) = LazyTensor(a.basis_l ⊗ b.basis_l, a.basis_r ⊗ b.basis_r, [a.indices; b.indices .+ length(a.basis_l.bases)], Operator[a.operators; b.operators], a.factor*b.factor) -function operators.trace(op::LazyTensor) +function operators.tr(op::LazyTensor) b = basis(op) result = op.factor for i in 1:length(b.bases) if i in op.indices - result *= trace(suboperator(op, i)) + result *= tr(suboperator(op, i)) else result *= length(b.bases[i]) end @@ -149,7 +150,7 @@ function operators.ptrace(op::LazyTensor, indices::Vector{Int}) factor = op.factor for i in indices if i in op.indices - factor *= trace(suboperator(op, i)) + factor *= tr(suboperator(op, i)) else factor *= length(op.basis_l.bases[i]) end @@ -163,19 +164,19 @@ function operators.ptrace(op::LazyTensor, indices::Vector{Int}) if rank==1 return factor * identityoperator(b_l, b_r) end - ops = Vector{Operator}(length(remaining_indices)) + ops = Vector{Operator}(undef, length(remaining_indices)) for i in 1:length(ops) ops[i] = suboperator(op, remaining_indices[i]) end LazyTensor(b_l, b_r, sortedindices.shiftremove(op.indices, indices), ops, factor) end -operators.normalize!(op::LazyTensor) = (op.factor /= trace(op)) +operators.normalize!(op::LazyTensor) = (op.factor /= tr(op); nothing) function operators.permutesystems(op::LazyTensor, perm::Vector{Int}) b_l = permutesystems(op.basis_l, perm) b_r = permutesystems(op.basis_r, perm) - indices = [findfirst(perm, i) for i in op.indices] + indices = [findfirst(isequal(i), perm) for i in op.indices] perm_ = sortperm(indices) LazyTensor(b_l, b_r, indices[perm_], op.operators[perm_], op.factor) end @@ -184,10 +185,10 @@ operators.identityoperator(::Type{LazyTensor}, b1::Basis, b2::Basis) = LazyTenso # Recursively calculate result_{IK} = \\sum_J op_{IJ} h_{JK} -function _gemm_recursive_dense_lazy(i_k::Int, N_k::Int, K::Int, J::Int, val::Complex128, +function _gemm_recursive_dense_lazy(i_k::Int, N_k::Int, K::Int, J::Int, val::ComplexF64, shape::Vector{Int}, strides_k::Vector{Int}, strides_j::Vector{Int}, indices::Vector{Int}, h::LazyTensor, - op::Matrix{Complex128}, result::Matrix{Complex128}) + op::Matrix{ComplexF64}, result::Matrix{ComplexF64}) if i_k > N_k for I=1:size(op, 1) result[I, K] += val*op[I, J] @@ -197,7 +198,7 @@ function _gemm_recursive_dense_lazy(i_k::Int, N_k::Int, K::Int, J::Int, val::Com if i_k in indices h_i = operators_lazytensor.suboperator(h, i_k) if isa(h_i, SparseOperator) - h_i_data = h_i.data::SparseMatrixCSC{Complex128,Int} + h_i_data = h_i.data::SparseMatrixCSC{ComplexF64,Int} @inbounds for k=1:h_i_data.n K_ = K + strides_k[i_k]*(k-1) @inbounds for jptr=h_i_data.colptr[k]:h_i_data.colptr[k+1]-1 @@ -208,7 +209,7 @@ function _gemm_recursive_dense_lazy(i_k::Int, N_k::Int, K::Int, J::Int, val::Com end end elseif isa(h_i, DenseOperator) - h_i_data = h_i.data::Matrix{Complex128} + h_i_data = h_i.data::Matrix{ComplexF64} Nk = size(h_i_data, 2) Nj = size(h_i_data, 1) @inbounds for k=1:Nk @@ -233,10 +234,10 @@ end # Recursively calculate result_{JI} = \\sum_K h_{JK} op_{KI} -function _gemm_recursive_lazy_dense(i_k::Int, N_k::Int, K::Int, J::Int, val::Complex128, +function _gemm_recursive_lazy_dense(i_k::Int, N_k::Int, K::Int, J::Int, val::ComplexF64, shape::Vector{Int}, strides_k::Vector{Int}, strides_j::Vector{Int}, indices::Vector{Int}, h::LazyTensor, - op::Matrix{Complex128}, result::Matrix{Complex128}) + op::Matrix{ComplexF64}, result::Matrix{ComplexF64}) if i_k > N_k for I=1:size(op, 2) result[J, I] += val*op[K, I] @@ -246,7 +247,7 @@ function _gemm_recursive_lazy_dense(i_k::Int, N_k::Int, K::Int, J::Int, val::Com if i_k in indices h_i = suboperator(h, i_k) if isa(h_i, SparseOperator) - h_i_data = h_i.data::SparseMatrixCSC{Complex128,Int} + h_i_data = h_i.data::SparseMatrixCSC{ComplexF64,Int} @inbounds for k=1:h_i_data.n K_ = K + strides_k[i_k]*(k-1) @inbounds for jptr=h_i_data.colptr[k]:h_i_data.colptr[k+1]-1 @@ -257,7 +258,7 @@ function _gemm_recursive_lazy_dense(i_k::Int, N_k::Int, K::Int, J::Int, val::Com end end elseif isa(h_i, DenseOperator) - h_i_data = h_i.data::Matrix{Complex128} + h_i_data = h_i.data::Matrix{ComplexF64} Nk = size(h_i_data, 2) Nj = size(h_i_data, 1) @inbounds for k=1:Nk @@ -280,11 +281,11 @@ function _gemm_recursive_lazy_dense(i_k::Int, N_k::Int, K::Int, J::Int, val::Com end end -function gemm(alpha::Complex128, op::Matrix{Complex128}, h::LazyTensor, beta::Complex128, result::Matrix{Complex128}) - if beta == Complex128(0.) +function gemm(alpha::ComplexF64, op::Matrix{ComplexF64}, h::LazyTensor, beta::ComplexF64, result::Matrix{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(beta, result) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end N_k = length(h.basis_r.bases) shape = [min(h.basis_l.shape[i], h.basis_r.shape[i]) for i=1:length(h.basis_l.shape)] @@ -293,11 +294,11 @@ function gemm(alpha::Complex128, op::Matrix{Complex128}, h::LazyTensor, beta::Co _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function gemm(alpha::Complex128, h::LazyTensor, op::Matrix{Complex128}, beta::Complex128, result::Matrix{Complex128}) - if beta == Complex128(0.) +function gemm(alpha::ComplexF64, h::LazyTensor, op::Matrix{ComplexF64}, beta::ComplexF64, result::Matrix{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(beta, result) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end N_k = length(h.basis_l.bases) shape = [min(h.basis_l.shape[i], h.basis_r.shape[i]) for i=1:length(h.basis_l.shape)] @@ -306,22 +307,22 @@ function gemm(alpha::Complex128, h::LazyTensor, op::Matrix{Complex128}, beta::Co _gemm_recursive_lazy_dense(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -operators.gemm!(alpha, h::LazyTensor, op::DenseOperator, beta, result::DenseOperator) = gemm(convert(Complex128, alpha), h, op.data, convert(Complex128, beta), result.data) -operators.gemm!(alpha, op::DenseOperator, h::LazyTensor, beta, result::DenseOperator) = gemm(convert(Complex128, alpha), op.data, h, convert(Complex128, beta), result.data) +operators.gemm!(alpha, h::LazyTensor, op::DenseOperator, beta, result::DenseOperator) = gemm(convert(ComplexF64, alpha), h, op.data, convert(ComplexF64, beta), result.data) +operators.gemm!(alpha, op::DenseOperator, h::LazyTensor, beta, result::DenseOperator) = gemm(convert(ComplexF64, alpha), op.data, h, convert(ComplexF64, beta), result.data) -function operators.gemv!(alpha::Complex128, a::LazyTensor, b::Ket, beta::Complex128, result::Ket) +function operators.gemv!(alpha::ComplexF64, a::LazyTensor, b::Ket, beta::ComplexF64, result::Ket) b_data = reshape(b.data, length(b.data), 1) result_data = reshape(result.data, length(result.data), 1) gemm(alpha, a, b_data, beta, result_data) end -function operators.gemv!(alpha::Complex128, a::Bra, b::LazyTensor, beta::Complex128, result::Bra) +function operators.gemv!(alpha::ComplexF64, a::Bra, b::LazyTensor, beta::ComplexF64, result::Bra) a_data = reshape(a.data, 1, length(a.data)) result_data = reshape(result.data, 1, length(result.data)) gemm(alpha, a_data, b, beta, result_data) end -operators.gemv!(alpha, a::LazyTensor, b::Ket, beta, result::Ket) = operators.gemv!(convert(Complex128, alpha), a, b, convert(Complex128, beta), result) -operators.gemv!(alpha, a::Bra, b::LazyTensor, beta, result::Bra) = operators.gemv!(convert(Complex128, alpha), a, b, convert(Complex128, beta), result) +operators.gemv!(alpha, a::LazyTensor, b::Ket, beta, result::Ket) = operators.gemv!(convert(ComplexF64, alpha), a, b, convert(ComplexF64, beta), result) +operators.gemv!(alpha, a::Bra, b::LazyTensor, beta, result::Bra) = operators.gemv!(convert(ComplexF64, alpha), a, b, convert(ComplexF64, beta), result) end # module diff --git a/src/operators_sparse.jl b/src/operators_sparse.jl index 19f4c0f1..7bd9fe36 100644 --- a/src/operators_sparse.jl +++ b/src/operators_sparse.jl @@ -4,8 +4,10 @@ export SparseOperator, diagonaloperator import Base: ==, *, /, +, - import ..operators +import SparseArrays: sparse using ..bases, ..states, ..operators, ..operators_dense, ..sparsematrix +using SparseArrays, LinearAlgebra """ @@ -19,7 +21,7 @@ in the `data` field. mutable struct SparseOperator <: Operator basis_l::Basis basis_r::Basis - data::SparseMatrixCSC{Complex128, Int} + data::SparseMatrixCSC{ComplexF64, Int} function SparseOperator(b1::Basis, b2::Basis, data) if length(b1) != size(data, 1) || length(b2) != size(data, 2) throw(DimensionMismatch()) @@ -28,24 +30,24 @@ mutable struct SparseOperator <: Operator end end -SparseOperator(b::Basis, data::SparseMatrixCSC{Complex128, Int}) = SparseOperator(b, b, data) -SparseOperator(b::Basis, data::Matrix{Complex128}) = SparseOperator(b, sparse(data)) +SparseOperator(b::Basis, data::SparseMatrixCSC{ComplexF64, Int}) = SparseOperator(b, b, data) +SparseOperator(b::Basis, data::Matrix{ComplexF64}) = SparseOperator(b, sparse(data)) SparseOperator(op::DenseOperator) = SparseOperator(op.basis_l, op.basis_r, sparse(op.data)) -SparseOperator(b1::Basis, b2::Basis) = SparseOperator(b1, b2, spzeros(Complex128, length(b1), length(b2))) +SparseOperator(b1::Basis, b2::Basis) = SparseOperator(b1, b2, spzeros(ComplexF64, length(b1), length(b2))) SparseOperator(b::Basis) = SparseOperator(b, b) Base.copy(x::SparseOperator) = SparseOperator(x.basis_l, x.basis_r, copy(x.data)) -Base.full(a::SparseOperator) = DenseOperator(a.basis_l, a.basis_r, full(a.data)) +operators.dense(a::SparseOperator) = DenseOperator(a.basis_l, a.basis_r, Matrix(a.data)) """ sparse(op::Operator) Convert an arbitrary operator into a [`SparseOperator`](@ref). """ -Base.sparse(a::Operator) = throw(ArgumentError("Direct conversion from $(typeof(a)) not implemented. Use sparse(full(op)) instead.")) -Base.sparse(a::SparseOperator) = copy(a) -Base.sparse(a::DenseOperator) = SparseOperator(a.basis_l, a.basis_r, sparse(a.data)) +sparse(a::Operator) = throw(ArgumentError("Direct conversion from $(typeof(a)) not implemented. Use sparse(full(op)) instead.")) +sparse(a::SparseOperator) = copy(a) +sparse(a::DenseOperator) = SparseOperator(a.basis_l, a.basis_r, sparse(a.data)) ==(x::SparseOperator, y::SparseOperator) = (x.basis_l == y.basis_l) && (x.basis_r == y.basis_r) && (x.data == y.data) @@ -73,7 +75,7 @@ operators.tensor(a::SparseOperator, b::SparseOperator) = SparseOperator(tensor(a operators.tensor(a::DenseOperator, b::SparseOperator) = SparseOperator(tensor(a.basis_l, b.basis_l), tensor(a.basis_r, b.basis_r), kron(b.data, a.data)) operators.tensor(a::SparseOperator, b::DenseOperator) = SparseOperator(tensor(a.basis_l, b.basis_l), tensor(a.basis_r, b.basis_r), kron(b.data, a.data)) -operators.trace(op::SparseOperator) = (check_samebases(op); trace(op.data)) +operators.tr(op::SparseOperator) = (check_samebases(op); tr(op.data)) operators.conj(op::SparseOperator) = SparseOperator(op.basis_l, op.basis_r, conj(op.data)) operators.conj!(op::SparseOperator) = conj!(op.data) @@ -97,7 +99,7 @@ end function operators.expect(op::SparseOperator, state::DenseOperator) check_samebases(op.basis_r, state.basis_l) check_samebases(op.basis_l, state.basis_r) - result = Complex128(0.) + result = ComplexF64(0.) @inbounds for colindex = 1:op.data.n for i=op.data.colptr[colindex]:op.data.colptr[colindex+1]-1 result += op.data.nzval[i]*state.data[colindex, op.data.rowval[i]] @@ -110,11 +112,11 @@ function operators.permutesystems(rho::SparseOperator, perm::Vector{Int}) @assert length(rho.basis_l.bases) == length(rho.basis_r.bases) == length(perm) @assert isperm(perm) shape = [rho.basis_l.shape; rho.basis_r.shape] - data = sparsematrix.permutedims(rho.data, shape, [perm; perm + length(perm)]) + data = sparsematrix.permutedims(rho.data, shape, [perm; perm .+ length(perm)]) SparseOperator(permutesystems(rho.basis_l, perm), permutesystems(rho.basis_r, perm), data) end -operators.identityoperator(::Type{SparseOperator}, b1::Basis, b2::Basis) = SparseOperator(b1, b2, speye(Complex128, length(b1), length(b2))) +operators.identityoperator(::Type{SparseOperator}, b1::Basis, b2::Basis) = SparseOperator(b1, b2, sparse(ComplexF64(1)*I, length(b1), length(b2))) operators.identityoperator(b1::Basis, b2::Basis) = identityoperator(SparseOperator, b1, b2) operators.identityoperator(b::Basis) = identityoperator(b, b) @@ -125,14 +127,14 @@ Create a diagonal operator of type [`SparseOperator`](@ref). """ function diagonaloperator(b::Basis, diag::Vector{T}) where T <: Number @assert 1 <= length(diag) <= prod(b.shape) - SparseOperator(b, spdiagm(convert(Vector{Complex128}, diag))) + SparseOperator(b, sparse(Diagonal(convert(Vector{ComplexF64}, diag)))) end # Fast in-place multiplication implementations -operators.gemm!(alpha, M::SparseOperator, b::DenseOperator, beta, result::DenseOperator) = sparsematrix.gemm!(convert(Complex128, alpha), M.data, b.data, convert(Complex128, beta), result.data) -operators.gemm!(alpha, a::DenseOperator, M::SparseOperator, beta, result::DenseOperator) = sparsematrix.gemm!(convert(Complex128, alpha), a.data, M.data, convert(Complex128, beta), result.data) -operators.gemv!(alpha, M::SparseOperator, b::Ket, beta, result::Ket) = sparsematrix.gemv!(convert(Complex128, alpha), M.data, b.data, convert(Complex128, beta), result.data) -operators.gemv!(alpha, b::Bra, M::SparseOperator, beta, result::Bra) = sparsematrix.gemv!(convert(Complex128, alpha), b.data, M.data, convert(Complex128, beta), result.data) +operators.gemm!(alpha, M::SparseOperator, b::DenseOperator, beta, result::DenseOperator) = sparsematrix.gemm!(convert(ComplexF64, alpha), M.data, b.data, convert(ComplexF64, beta), result.data) +operators.gemm!(alpha, a::DenseOperator, M::SparseOperator, beta, result::DenseOperator) = sparsematrix.gemm!(convert(ComplexF64, alpha), a.data, M.data, convert(ComplexF64, beta), result.data) +operators.gemv!(alpha, M::SparseOperator, b::Ket, beta, result::Ket) = sparsematrix.gemv!(convert(ComplexF64, alpha), M.data, b.data, convert(ComplexF64, beta), result.data) +operators.gemv!(alpha, b::Bra, M::SparseOperator, beta, result::Bra) = sparsematrix.gemv!(convert(ComplexF64, alpha), b.data, M.data, convert(ComplexF64, beta), result.data) end # module diff --git a/src/particle.jl b/src/particle.jl index f9073feb..ceb1c49c 100644 --- a/src/particle.jl +++ b/src/particle.jl @@ -10,7 +10,7 @@ import Base: ==, position import ..operators using ..bases, ..states, ..operators, ..operators_dense, ..operators_sparse - +using FFTW, SparseArrays, LinearAlgebra """ PositionBasis(xmin, xmax, Npoints) @@ -106,7 +106,7 @@ that the resulting Ket state is normalized. function gaussianstate(b::PositionBasis, x0::Real, p0::Real, sigma::Real) psi = Ket(b) dx = spacing(b) - alpha = 1./(pi^(1/4)*sqrt(sigma))*sqrt(dx) + alpha = 1.0/(pi^(1/4)*sqrt(sigma))*sqrt(dx) x = b.xmin for i=1:b.N psi.data[i] = alpha*exp(1im*p0*(x-x0/2) - (x-x0)^2/(2*sigma^2)) @@ -159,7 +159,7 @@ samplepoints(b::MomentumBasis) = (dp = spacing(b); Float64[b.pmin + i*dp for i=0 Position operator in real space. """ -position(b::PositionBasis) = SparseOperator(b, spdiagm(complex(samplepoints(b)), 0, length(b), length(b))) +position(b::PositionBasis) = SparseOperator(b, sparse(Diagonal(complex(samplepoints(b))))) """ @@ -169,7 +169,7 @@ Position operator in momentum space. """ function position(b::MomentumBasis) b_pos = PositionBasis(b) - transform(b, b_pos)*full(position(b_pos))*transform(b_pos, b) + transform(b, b_pos)*dense(position(b_pos))*transform(b_pos, b) end """ @@ -177,7 +177,7 @@ end Momentum operator in momentum space. """ -momentum(b::MomentumBasis) = SparseOperator(b, spdiagm(complex(samplepoints(b)), 0, length(b), length(b))) +momentum(b::MomentumBasis) = SparseOperator(b, sparse(Diagonal(complex(samplepoints(b))))) """ momentum(b::PositionBasis) @@ -186,7 +186,7 @@ Momentum operator in real space. """ function momentum(b::PositionBasis) b_mom = MomentumBasis(b) - transform(b, b_mom)*full(momentum(b_mom))*transform(b_mom, b) + transform(b, b_mom)*dense(momentum(b_mom))*transform(b_mom, b) end """ @@ -206,7 +206,7 @@ Operator representing a potential ``V(x)`` in momentum space. """ function potentialoperator(b::MomentumBasis, V::Function) b_pos = PositionBasis(b) - transform(b, b_pos)*full(potentialoperator(b_pos, V))*transform(b_pos, b) + transform(b, b_pos)*dense(potentialoperator(b_pos, V))*transform(b_pos, b) end """ @@ -240,9 +240,9 @@ function potentialoperator_position(b::CompositeBasis, V::Function) points = [samplepoints(b1) for b1=b.bases] dims = length.(points) n = length(b.bases) - data = Array{Complex128}(dims...) + data = Array{ComplexF64}(undef, dims...) @inbounds for i=1:length(data) - index = ind2sub(data, i) + index = Tuple(CartesianIndices(data)[i]) args = (points[j][index[j]] for j=1:n) data[i] = V(args...) end @@ -256,7 +256,7 @@ function potentialoperator_momentum(b::CompositeBasis, V::Function) push!(bases_pos, PositionBasis(base)) end b_pos = tensor(bases_pos...) - transform(b, b_pos)*full(potentialoperator_position(b_pos, V))*transform(b_pos, b) + transform(b, b_pos)*dense(potentialoperator_position(b_pos, V))*transform(b_pos, b) end """ @@ -266,7 +266,7 @@ Abstract type for all implementations of FFT operators. """ abstract type FFTOperator <: Operator end -const PlanFFT = Base.DFT.FFTW.cFFTWPlan +const PlanFFT = FFTW.cFFTWPlan """ FFTOperators @@ -281,8 +281,8 @@ mutable struct FFTOperators <: FFTOperator fft_r!::PlanFFT fft_l2!::PlanFFT fft_r2!::PlanFFT - mul_before::Array{Complex128} - mul_after::Array{Complex128} + mul_before::Array{ComplexF64} + mul_after::Array{ComplexF64} end """ @@ -296,8 +296,8 @@ mutable struct FFTKets <: FFTOperator basis_r::Basis fft_l!::PlanFFT fft_r!::PlanFFT - mul_before::Array{Complex128} - mul_after::Array{Complex128} + mul_before::Array{ComplexF64} + mul_after::Array{ComplexF64} end """ @@ -313,13 +313,13 @@ function transform(basis_l::MomentumBasis, basis_r::PositionBasis; ket_only::Boo if basis_l.N != basis_r.N || abs(2*pi/dp - Lx)/Lx > 1e-12 throw(IncompatibleBases()) end - mul_before = exp.(-1im*basis_l.pmin*(samplepoints(basis_r)-basis_r.xmin)) + mul_before = exp.(-1im*basis_l.pmin*(samplepoints(basis_r) .- basis_r.xmin)) mul_after = exp.(-1im*basis_r.xmin*samplepoints(basis_l))/sqrt(basis_r.N) - x = Vector{Complex128}(length(basis_r)) + x = Vector{ComplexF64}(undef, length(basis_r)) if ket_only FFTKets(basis_l, basis_r, plan_bfft!(x), plan_fft!(x), mul_before, mul_after) else - A = Matrix{Complex128}(length(basis_r), length(basis_r)) + A = Matrix{ComplexF64}(undef, length(basis_r), length(basis_r)) FFTOperators(basis_l, basis_r, plan_bfft!(x), plan_fft!(x), plan_bfft!(A, 2), plan_fft!(A, 1), mul_before, mul_after) end end @@ -338,13 +338,13 @@ function transform(basis_l::PositionBasis, basis_r::MomentumBasis; ket_only::Boo if basis_l.N != basis_r.N || abs(2*pi/dp - Lx)/Lx > 1e-12 throw(IncompatibleBases()) end - mul_before = exp.(1im*basis_l.xmin*(samplepoints(basis_r)-basis_r.pmin)) + mul_before = exp.(1im*basis_l.xmin*(samplepoints(basis_r) .- basis_r.pmin)) mul_after = exp.(1im*basis_r.pmin*samplepoints(basis_l))/sqrt(basis_r.N) - x = Vector{Complex128}(length(basis_r)) + x = Vector{ComplexF64}(undef, length(basis_r)) if ket_only FFTKets(basis_l, basis_r, plan_fft!(x), plan_bfft!(x), mul_before, mul_after) else - A = Matrix{Complex128}(length(basis_r), length(basis_r)) + A = Matrix{ComplexF64}(undef, length(basis_r), length(basis_r)) FFTOperators(basis_l, basis_r, plan_fft!(x), plan_bfft!(x), plan_fft!(A, 2), plan_bfft!(A, 1), mul_before, mul_after) end end @@ -391,7 +391,7 @@ function transform_xp(basis_l::CompositeBasis, basis_r::CompositeBasis, index::V end if index[1] == 1 - mul_before = exp.(1im*basis_l.bases[1].xmin*(samplepoints(basis_r.bases[1])-basis_r.bases[1].pmin)) + mul_before = exp.(1im*basis_l.bases[1].xmin*(samplepoints(basis_r.bases[1]) .- basis_r.bases[1].pmin)) mul_after = exp.(1im*basis_r.bases[1].pmin*samplepoints(basis_l.bases[1]))/sqrt(basis_r.bases[1].N) else mul_before = ones(N[1]) @@ -399,21 +399,21 @@ function transform_xp(basis_l::CompositeBasis, basis_r::CompositeBasis, index::V end for i=2:n if any(i .== index) - mul_before = kron(exp.(1im*basis_l.bases[i].xmin*(samplepoints(basis_r.bases[i])-basis_r.bases[i].pmin)), mul_before) + mul_before = kron(exp.(1im*basis_l.bases[i].xmin*(samplepoints(basis_r.bases[i]) .- basis_r.bases[i].pmin)), mul_before) mul_after = kron(exp.(1im*basis_r.bases[i].pmin*samplepoints(basis_l.bases[i]))/sqrt(basis_r.bases[i].N), mul_after) else mul_before = kron(ones(N[i]), mul_before) mul_after = kron(ones(N[i]), mul_after) end end - mul_before = reshape(mul_before, (N...)) - mul_after = reshape(mul_after, (N...)) + mul_before = reshape(mul_before, (N...,)) + mul_after = reshape(mul_after, (N...,)) - x = Array{Complex128}(N...) + x = Array{ComplexF64}(undef, N...) if ket_only FFTKets(basis_l, basis_r, plan_fft!(x, index), plan_bfft!(x, index), mul_before, mul_after) else - A = Array{Complex128}([N, N;]...) + A = Array{ComplexF64}(undef, [N; N]...) FFTOperators(basis_l, basis_r, plan_fft!(x, index), plan_bfft!(x, index), plan_fft!(A, [n + 1:2n;][index]), plan_bfft!(A, [1:n;][index]), mul_before, mul_after) end end @@ -436,7 +436,7 @@ function transform_px(basis_l::CompositeBasis, basis_r::CompositeBasis, index::V end if index[1] == 1 - mul_before = exp.(-1im*basis_l.bases[1].pmin*(samplepoints(basis_r.bases[1])-basis_r.bases[1].xmin)) + mul_before = exp.(-1im*basis_l.bases[1].pmin*(samplepoints(basis_r.bases[1]) .- basis_r.bases[1].xmin)) mul_after = exp.(-1im*basis_r.bases[1].xmin*samplepoints(basis_l.bases[1]))/sqrt(N[1]) else mul_before = ones(N[1]) @@ -444,26 +444,26 @@ function transform_px(basis_l::CompositeBasis, basis_r::CompositeBasis, index::V end for i=2:n if i in index - mul_before = kron(exp.(-1im*basis_l.bases[i].pmin*(samplepoints(basis_r.bases[i])-basis_r.bases[i].xmin)), mul_before) + mul_before = kron(exp.(-1im*basis_l.bases[i].pmin*(samplepoints(basis_r.bases[i]) .- basis_r.bases[i].xmin)), mul_before) mul_after = kron(exp.(-1im*basis_r.bases[i].xmin*samplepoints(basis_l.bases[i]))/sqrt(N[i]), mul_after) else mul_before = kron(ones(N[i]), mul_before) mul_after = kron(ones(N[i]), mul_after) end end - mul_before = reshape(mul_before, (N...)) - mul_after = reshape(mul_after, (N...)) + mul_before = reshape(mul_before, (N...,)) + mul_after = reshape(mul_after, (N...,)) - x = Array{Complex128}(N...) + x = Array{ComplexF64}(undef, N...) if ket_only FFTKets(basis_l, basis_r, plan_bfft!(x, index), plan_fft!(x, index), mul_before, mul_after) else - A = Array{Complex128}([N, N;]...) + A = Array{ComplexF64}(undef, [N; N]...) FFTOperators(basis_l, basis_r, plan_bfft!(x, index), plan_fft!(x, index), plan_bfft!(A, [n + 1:2n;][index]), plan_fft!(A, [1:n;][index]), mul_before, mul_after) end end -operators.full(op::FFTOperators) = op*identityoperator(DenseOperator, op.basis_r) +operators.dense(op::FFTOperators) = op*identityoperator(DenseOperator, op.basis_r) operators.dagger(op::FFTOperators) = transform(op.basis_r, op.basis_l) operators.dagger(op::FFTKets) = transform(op.basis_r, op.basis_l; ket_only=true) @@ -472,8 +472,8 @@ operators.tensor(A::FFTOperators, B::FFTOperators) = transform(tensor(A.basis_l, operators.tensor(A::FFTKets, B::FFTKets) = transform(tensor(A.basis_l, B.basis_l), tensor(A.basis_r, B.basis_r); ket_only=true) function operators.gemv!(alpha_, M::FFTOperator, b::Ket, beta_, result::Ket) - alpha = convert(Complex128, alpha_) - beta = convert(Complex128, beta_) + alpha = convert(ComplexF64, alpha_) + beta = convert(ComplexF64, beta_) N::Int = length(M.basis_r) if beta==Complex(0.) @inbounds for i=1:N @@ -497,8 +497,8 @@ function operators.gemv!(alpha_, M::FFTOperator, b::Ket, beta_, result::Ket) end function operators.gemv!(alpha_, b::Bra, M::FFTOperator, beta_, result::Bra) - alpha = convert(Complex128, alpha_) - beta = convert(Complex128, beta_) + alpha = convert(ComplexF64, alpha_) + beta = convert(ComplexF64, beta_) N::Int = length(M.basis_l) if beta==Complex(0.) @inbounds for i=1:N @@ -522,46 +522,58 @@ function operators.gemv!(alpha_, b::Bra, M::FFTOperator, beta_, result::Bra) end function operators.gemm!(alpha_, A::DenseOperator, B::FFTOperators, beta_, result::DenseOperator) - alpha = convert(Complex128, alpha_) - beta = convert(Complex128, beta_) + alpha = convert(ComplexF64, alpha_) + beta = convert(ComplexF64, beta_) if beta != Complex(0.) - data = Matrix{Complex128}(size(result.data, 1), size(result.data, 2)) + data = Matrix{ComplexF64}(undef, size(result.data, 1), size(result.data, 2)) else data = result.data end - copy!(data, A.data) - scale!(data, B.mul_after[:]) + copyto!(data, A.data) + @inbounds for j=1:length(B.mul_after), i=1:length(B.mul_after) + data[i, j] *= B.mul_after[j] + end + # rmul!(data, B.mul_after[:]) conj!(data) - B.fft_l2! * reshape(data, [size(B.mul_after)..., size(B.mul_after)...;]...) + B.fft_l2! * reshape(data, [size(B.mul_after)...; size(B.mul_after)...]...) conj!(data) - scale!(data, B.mul_before[:]) + @inbounds for j=1:length(B.mul_before), i=1:length(B.mul_before) + data[i, j] *= B.mul_before[j] + end + # rmul!(data, B.mul_before[:]) if alpha != Complex(1.) - scale!(alpha, data) + lmul!(alpha, data) end if beta != Complex(0.) - scale!(result.data, beta) + rmul!(result.data, beta) result.data += data end nothing end function operators.gemm!(alpha_, A::FFTOperators, B::DenseOperator, beta_, result::DenseOperator) - alpha = convert(Complex128, alpha_) - beta = convert(Complex128, beta_) + alpha = convert(ComplexF64, alpha_) + beta = convert(ComplexF64, beta_) if beta != Complex(0.) - data = Matrix{Complex128}(size(result.data, 1), size(result.data, 2)) + data = Matrix{ComplexF64}(undef, size(result.data, 1), size(result.data, 2)) else data = result.data end - copy!(data, B.data) - scale!(A.mul_before[:], data) - A.fft_r2! * reshape(data, [size(A.mul_before)..., size(A.mul_before)...;]...) - scale!(A.mul_after[:], data) + copyto!(data, B.data) + @inbounds for j=1:length(A.mul_before), i=1:length(A.mul_before) + data[i, j] *= A.mul_before[i] + end + # rmul!(A.mul_before[:], data) + A.fft_r2! * reshape(data, [size(A.mul_before)...; size(A.mul_before)...]...) + @inbounds for j=1:length(A.mul_after), i=1:length(A.mul_after) + data[i, j] *= A.mul_after[i] + end + # rmul!(A.mul_after[:], data) if alpha != Complex(1.) - scale!(alpha, data) + lmul!(alpha, data) end if beta != Complex(0.) - scale!(result.data, beta) + rmul!(result.data, beta) result.data += data end nothing diff --git a/src/phasespace.jl b/src/phasespace.jl index 5fb87c2a..2cef8462 100755 --- a/src/phasespace.jl +++ b/src/phasespace.jl @@ -3,6 +3,7 @@ module phasespace export qfunc, wigner, coherentspinstate, qfuncsu2, wignersu2, ylm using ..bases, ..states, ..operators, ..operators_dense, ..fock, ..spin +using LinearAlgebra import WignerSymbols: clebschgordan @@ -19,7 +20,7 @@ done via the relation ``α = \\frac{1}{\\sqrt{2}}(x + i y)``. function qfunc(rho::Operator, alpha::Number) b = basis(rho) @assert isa(b, FockBasis) - _qfunc_operator(rho, convert(Complex128, alpha), Ket(b), Ket(b)) + _qfunc_operator(rho, convert(ComplexF64, alpha), Ket(b), Ket(b)) end function qfunc(rho::Operator, xvec::Vector{Float64}, yvec::Vector{Float64}) @@ -29,7 +30,7 @@ function qfunc(rho::Operator, xvec::Vector{Float64}, yvec::Vector{Float64}) Ny = length(yvec) tmp1 = Ket(b) tmp2 = Ket(b) - result = Matrix{Complex128}(Nx, Ny) + result = Matrix{ComplexF64}(undef, Nx, Ny) @inbounds for j=1:Ny, i=1:Nx result[i, j] = _qfunc_operator(rho, complex(xvec[i], yvec[j])/sqrt(2), tmp1, tmp2) end @@ -39,7 +40,7 @@ end function qfunc(psi::Ket, alpha::Number) b = basis(psi) @assert isa(b, FockBasis) - _alpha = convert(Complex128, alpha) + _alpha = convert(ComplexF64, alpha) _conj_alpha = conj(_alpha) N = length(psi.basis) s = psi.data[N]/sqrt(N-1) @@ -75,10 +76,10 @@ function qfunc(psi::Ket, xvec::Vector{Float64}, yvec::Vector{Float64}) end function qfunc(state::Union{Ket, Operator}, x::Number, y::Number) - qfunc(state, Complex128(x, y)/sqrt(2)) + qfunc(state, ComplexF64(x, y)/sqrt(2)) end -function _qfunc_operator(rho::Operator, alpha::Complex128, tmp1::Ket, tmp2::Ket) +function _qfunc_operator(rho::Operator, alpha::ComplexF64, tmp1::Ket, tmp2::Ket) coherentstate(basis(rho), alpha, tmp1) operators.gemv!(complex(1.), rho, tmp1, complex(0.), tmp2) a = dot(tmp1.data, tmp2.data) @@ -119,7 +120,7 @@ function wigner(rho::DenseOperator, xvec::Vector{Float64}, yvec::Vector{Float64} N = b.N::Int _2α = [complex(x, y)*sqrt(2) for x=xvec, y=yvec] abs2_2α = abs2.(_2α) - w = zeros(_2α) + w = zero(_2α) b0 = similar(_2α) b1 = similar(_2α) b2 = similar(_2α) @@ -137,9 +138,9 @@ wigner(psi::Ket, x, y) = wigner(dm(psi), x, y) wigner(state, alpha::Number) = wigner(state, real(alpha)*sqrt(2), imag(alpha)*sqrt(2)) -function _clenshaw_grid(L::Int, ρ::Matrix{Complex128}, - abs2_2α::Matrix{Float64}, _2α::Matrix{Complex128}, w::Matrix{Complex128}, - b0::Matrix{Complex128}, b1::Matrix{Complex128}, b2::Matrix{Complex128}, scale::Int) +function _clenshaw_grid(L::Int, ρ::Matrix{ComplexF64}, + abs2_2α::Matrix{Float64}, _2α::Matrix{ComplexF64}, w::Matrix{ComplexF64}, + b0::Matrix{ComplexF64}, b1::Matrix{ComplexF64}, b2::Matrix{ComplexF64}, scale::Int) n = size(ρ, 1)-L-1 points = length(w) if n==0 @@ -179,7 +180,7 @@ function _clenshaw_grid(L::Int, ρ::Matrix{Complex128}, end end -function _clenshaw(L::Int, abs2_2α::Float64, ρ::Matrix{Complex128}) +function _clenshaw(L::Int, abs2_2α::Float64, ρ::Matrix{ComplexF64}) n = size(ρ, 1)-L-1 if n==0 return ρ[1, L+1] @@ -215,7 +216,7 @@ parametrization is not unique), similarly to a qubit on the Bloch sphere. """ function coherentspinstate(b::SpinBasis, theta::Real, phi::Real, - result = Ket(b, Vector{Complex128}(length(b)))) + result = Ket(b, Vector{ComplexF64}(undef, length(b)))) data = result.data N = BigInt(length(b)-1) sinth = sin(0.5theta) @@ -248,7 +249,7 @@ function qfuncsu2(psi::Ket, Ntheta::Int; Nphi::Int=2Ntheta) psi_bra_data = psi.data' lb = float(b.spinnumber) @assert isa(b, SpinBasis) - result = Array{Float64}(Ntheta,Nphi) + result = Array{Float64}(undef, Ntheta,Nphi) @inbounds for i = 0:Ntheta-1, j = 0:Nphi-1 result[i+1,j+1] = (2*lb+1)/(4pi)*abs2(psi_bra_data*coherentspinstate(b,pi-i*pi/(Ntheta-1),j*2pi/(Nphi-1)-pi).data) end @@ -259,7 +260,7 @@ function qfuncsu2(rho::DenseOperator, Ntheta::Int; Nphi::Int=2Ntheta) b = basis(rho) lb = float(b.spinnumber) @assert isa(b, SpinBasis) - result = Array{Float64}(Ntheta,Nphi) + result = Array{Float64}(undef, Ntheta,Nphi) @inbounds for i = 0:Ntheta-1, j = 0:Nphi-1 c = coherentspinstate(b,pi-i*1pi/(Ntheta-1),j*2pi/(Nphi-1)-pi) result[i+1,j+1] = abs((2*lb+1)/(4pi)*c.data'*rho.data*c.data) @@ -305,9 +306,9 @@ function wignersu2(rho::DenseOperator, theta::Real, phi::Real) N = length(basis(rho))-1 ### Tensor generation ### - BandT = Array{Vector{Float64}}(N,N+1) - BandT[1,1] = collect(linspace(-N/2, N/2, N+1)) - BandT[1,2] = -collect(sqrt.(linspace(1, N, N)).*sqrt.(linspace((N)/2, 1/2, N))) + BandT = Array{Vector{Float64}}(undef, N,N+1) + BandT[1,1] = collect(range(-N/2, stop=N/2, length=N+1)) + BandT[1,2] = -collect(sqrt.(range(1, stop=N, length=N)).*sqrt.(range((N)/2, stop=1/2, length=N))) BandT[2,1] = clebschgordan(1,0,1,0,2,0)*BandT[1,1].*BandT[1,1] - clebschgordan(1,-1,1,1,2,0)*[zeros(N+1-length(BandT[1,2])); BandT[1,2].*BandT[1,2]] - clebschgordan(1,1,1,-1,2,0)*[BandT[1,2].*BandT[1,2]; zeros(N+1-length(BandT[1,2]))] @@ -341,7 +342,7 @@ function wignersu2(rho::DenseOperator, theta::Real, phi::Real) ### State decomposition ### c = rho.data - EVT = Array{Complex128}(N,N+1) + EVT = Array{ComplexF64}(undef, N,N+1) @inbounds for S = 1:N, M = 0:S EVT[S,M+1] = conj(sum(BandT[S,M+1].*diag(c,M))) end @@ -355,9 +356,9 @@ function wignersu2(rho::DenseOperator, Ntheta::Int; Nphi::Int=2Ntheta) N = length(basis(rho))-1 ### Tensor generation ### - BandT = Array{Vector{Float64}}(N,N+1) - BandT[1,1] = collect(linspace(-N/2, N/2, N+1)) - BandT[1,2] = -collect(sqrt.(linspace(1, N, N)).*sqrt.(linspace((N)/2, 1/2, N))) + BandT = Array{Vector{Float64}}(undef, N,N+1) + BandT[1,1] = collect(range(-N/2, stop=N/2, length=N+1)) + BandT[1,2] = -collect(sqrt.(range(1, stop=N, length=N)).*sqrt.(range((N)/2, stop=1/2, length=N))) BandT[2,1] = clebschgordan(1,0,1,0,2,0)*BandT[1,1].*BandT[1,1] - clebschgordan(1,-1,1,1,2,0)*[zeros(N+1-length(BandT[1,2])); BandT[1,2].*BandT[1,2]] - clebschgordan(1,1,1,-1,2,0)*[BandT[1,2].*BandT[1,2]; zeros(N+1-length(BandT[1,2]))] @@ -389,19 +390,19 @@ function wignersu2(rho::DenseOperator, Ntheta::Int; Nphi::Int=2Ntheta) ### State decomposition ### c = rho.data - EVT = Array{Complex128}(N,N+1) + EVT = Array{ComplexF64}(undef, N,N+1) @inbounds for S = 1:N, M = 0:S EVT[S,M+1] = conj(sum(BandT[S,M+1].*diag(c,M))) end - wignermap = Array{Float64}(Ntheta,Nphi) + wignermap = Array{Float64}(undef, Ntheta,Nphi) @inbounds for i = 1:Ntheta, j = 1:Nphi wignermap[i,j] = _wignersu2int(N,i*1pi/(Ntheta-1)-1pi/(Ntheta-1),j*2pi/(Nphi-1)-2pi/(Nphi-1)-pi, EVT) end return wignermap*sqrt((N+1)/(4pi)) end -function _wignersu2int(N::Integer, theta::Real, phi::Real, EVT::Array{Complex128, 2}) +function _wignersu2int(N::Integer, theta::Real, phi::Real, EVT::Array{ComplexF64, 2}) UberBand = sqrt(1/(1+N))*ylm(0,0,theta,phi) @inbounds for S = 1:N @inbounds for M = 1:S @@ -424,7 +425,7 @@ This function calculates the value of Y(l,m) spherical harmonic at position θ a function ylm(l::Integer,m::Integer,theta::Real,phi::Real) phi_ = mod(phi,2pi) theta_ = mod(theta,2pi) - phase = e^(1.0im*m*phi_) + phase = exp(1.0im*m*phi_) if theta_ ≈ 0 if m == 0 return @. phase*sqrt((2*l+1)/pi)/2 diff --git a/src/polynomials.jl b/src/polynomials.jl index e89c435f..889d8cf3 100644 --- a/src/polynomials.jl +++ b/src/polynomials.jl @@ -35,7 +35,7 @@ Returns a vector of length N+1 where the n-th entry contains all coefficients for the n-th Hermite polynomial. """ function a(N::Int) - a = Vector{Vector{Int}}(N+1) + a = Vector{Vector{Int}}(undef, N+1) a[1] = [1] a[2] = [0,2] am = a[2] @@ -72,7 +72,7 @@ Returns a vector of length N+1 where the n-th entry contains all scaled coefficients for the n-th Hermite polynomial. """ function A(N) - A = Vector{Vector{Float64}}(N+1) + A = Vector{Vector{Float64}}(undef, N+1) A[1] = [1.] A[2] = [0., sqrt(2)] Am = A[2] @@ -104,4 +104,4 @@ module laguerre end # module laguerre -end # module polynomials \ No newline at end of file +end # module polynomials diff --git a/src/printing.jl b/src/printing.jl index e3298d55..85ad5d3a 100644 --- a/src/printing.jl +++ b/src/printing.jl @@ -2,13 +2,13 @@ module printing export set_printing -import Base: show +import Base: show, summary -using Compat using ..bases, ..states using ..operators, ..operators_dense, ..operators_sparse using ..operators_lazytensor, ..operators_lazysum, ..operators_lazyproduct using ..spin, ..fock, ..nlevel, ..particle, ..subspace, ..manybody, ..sparsematrix +using SparseArrays """ @@ -25,7 +25,7 @@ Set options for REPL output. function set_printing(; standard_order::Bool=_std_order, rounding_tol::Real=_round_tol) global _std_order = standard_order global _round_tol = rounding_tol - global machineprecorder = Int32(round(-log10(_round_tol), 0)) + global machineprecorder = Int32(round(-log10(_round_tol), digits=0)) nothing end set_printing(standard_order=false, rounding_tol=1e-17) @@ -34,7 +34,7 @@ function show(stream::IO, x::GenericBasis) if length(x.shape) == 1 write(stream, "Basis(dim=$(x.shape[1]))") else - s = replace(string(x.shape), " ", "") + s = replace(string(x.shape), " " => "") write(stream, "Basis(shape=$s)") end end @@ -84,72 +84,80 @@ function show(stream::IO, x::ManyBodyBasis) write(stream, "ManyBody(onebodybasis=$(x.onebodybasis), states:$(length(x.occupations)))") end +summary(io::IO, x::Ket) = print(io, "Ket(dim=$(length(x.basis)))\n basis: $(x.basis)\n") function show(stream::IO, x::Ket) - write(stream, "Ket(dim=$(length(x.basis)))\n basis: $(x.basis)\n") + summary(stream, x) if !_std_order - Base.showarray(stream, x.data, false; header=false) + Base.print_array(stream, round.(x.data; digits=machineprecorder)) else - showarray_stdord(stream, x.data, x.basis.shape, false, header=false) + showarray_stdord(stream, round.(x.data; digits=machineprecorder), x.basis.shape, false, header=false) end end +summary(io::IO, x::Bra) = print(io, "Bra(dim=$(length(x.basis)))\n basis: $(x.basis)\n") function show(stream::IO, x::Bra) - write(stream, "Bra(dim=$(length(x.basis)))\n basis: $(x.basis)\n") + summary(stream, x) if !_std_order - Base.showarray(stream, x.data, false; header=false) + Base.print_array(stream, round.(x.data; digits=machineprecorder)) else - showarray_stdord(stream, x.data, x.basis.shape, false, header=false) + showarray_stdord(stream, round.(x.data; digits=machineprecorder), x.basis.shape, false, header=false) end end -function showoperatorheader(stream::IO, x::Operator) - write(stream, "$(typeof(x).name.name)(dim=$(length(x.basis_l))x$(length(x.basis_r)))\n") +function summary(stream::IO, x::Operator) + print(stream, "$(typeof(x).name.name)(dim=$(length(x.basis_l))x$(length(x.basis_r)))\n") if bases.samebases(x) - write(stream, " basis: ") + print(stream, " basis: ") show(stream, basis(x)) else - write(stream, " basis left: ") + print(stream, " basis left: ") show(stream, x.basis_l) - write(stream, "\n basis right: ") + print(stream, "\n basis right: ") show(stream, x.basis_r) end end -show(stream::IO, x::Operator) = showoperatorheader(stream, x) +show(stream::IO, x::Operator) = summary(stream, x) function show(stream::IO, x::DenseOperator) - showoperatorheader(stream, x) - write(stream, "\n") + summary(stream, x) + print(stream, "\n") if !_std_order - Base.showarray(stream, x.data, false; header=false) + if !haskey(stream, :compact) + stream = IOContext(stream, :compact => true) + end + Base.print_array(stream, round.(x.data; digits=machineprecorder)) else - showarray_stdord(stream, x.data, x.basis_l.shape, x.basis_r.shape, false, header=false) + showarray_stdord(stream, round.(x.data; digits=machineprecorder), x.basis_l.shape, x.basis_r.shape, false, header=false) end end function show(stream::IO, x::SparseOperator) - showoperatorheader(stream, x) + summary(stream, x) if nnz(x.data) == 0 - write(stream, "\n []") + print(stream, "\n []") else if !_std_order - show(stream, x.data) + if !haskey(stream, :compact) + stream = IOContext(stream, :compact => true) + end + show(stream, round.(x.data; digits=machineprecorder)) else - showsparsearray_stdord(stream, x.data, x.basis_l.shape, x.basis_r.shape) + showsparsearray_stdord(stream, round.(x.data; digits=machineprecorder), x.basis_l.shape, x.basis_r.shape) end end end function show(stream::IO, x::LazyTensor) - showoperatorheader(stream, x) - write(stream, "\n operators: $(length(x.operators))") - s = replace(string(x.indices), " ", "") - write(stream, "\n indices: $s") + summary(stream, x) + print(stream, "\n operators: $(length(x.operators))") + s = replace(string(x.indices), " " => "") + print(stream, "\n indices: $s") end function show(stream::IO, x::Union{LazySum, LazyProduct}) - showoperatorheader(stream, x) - write(stream, "\n operators: $(length(x.operators))") + summary(stream, x) + print(stream, "\n operators: $(length(x.operators))") end """ @@ -304,7 +312,7 @@ function alignment_std(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vector break end end - if 1 < length(a) < length(indices(X,2)) + if 1 < length(a) < length(axes(X,2)) while sum(map(sum,a)) + sep*length(a) >= cols_otherwise pop!(a) end @@ -322,12 +330,12 @@ is specified as string sep. function print_matrix_row_std(io::IO, X::AbstractVecOrMat, A::Vector, ldims::Vector, rdims::Vector, i::Integer, cols::AbstractVector, sep::AbstractString) - isempty(A) || first(indices(cols,1)) == 1 || throw(DimensionMismatch("indices of cols ($(indices(cols,1))) must start at 1")) + isempty(A) || first(axes(cols,1)) == 1 || throw(DimensionMismatch("indices of cols ($(axes(cols,1))) must start at 1")) for k = 1:length(A) j = cols[k] x = X[mirror_world_index(i, ldims), mirror_world_index(j, rdims)] a = Base.alignment(io, x) - sx = sprint(0, show, x, env=io) + sx = sprint(show, x, context=io) l = repeat(" ", A[k][1]-a[1]) # pad on left and right as needed r = repeat(" ", A[k][2]-a[2]) prettysx = Base.replace_in_print_matrix(X,mirror_world_index(i, ldims),mirror_world_index(j, rdims),sx) @@ -354,9 +362,9 @@ function print_matrix_std(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vec screenwidth -= length(pre) + length(post) presp = repeat(" ", length(pre)) # indent each row to match pre string postsp = "" - @assert strwidth(hdots) == strwidth(ddots) + @assert textwidth(hdots) == textwidth(ddots) sepsize = length(sep) - rowsA, colsA = indices(X,1), indices(X,2) + rowsA, colsA = axes(X,1), axes(X,2) m, n = length(rowsA), length(colsA) # To figure out alignments, only need to look at as many rows as could # fit down screen. If screen has at least as many rows as A, look at A. @@ -391,7 +399,7 @@ function print_matrix_std(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vec print(io, i == first(rowsA) ? pre : presp) print_matrix_row_std(io, X,Lalign,ldims,rdims,i,colsA[1:length(Lalign)],sep) print(io, (i - first(rowsA)) % hmod == 0 ? hdots : repeat(" ", length(hdots))) - print_matrix_row_std(io, X,Ralign,ldims,rdims,i,n-length(Ralign)+colsA,sep) + print_matrix_row_std(io, X,Ralign,ldims,rdims,i,n-length(Ralign).+colsA,sep) print(io, i == last(rowsA) ? post : postsp) if i != last(rowsA); println(io); end end @@ -419,7 +427,7 @@ function print_matrix_std(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vec print(io, i == first(rowsA) ? pre : presp) print_matrix_row_std(io, X,Lalign,ldims, rdims,i,colsA[1:length(Lalign)],sep) print(io, (i - first(rowsA)) % hmod == 0 ? hdots : repeat(" ", length(hdots))) - print_matrix_row_std(io, X,Ralign,ldims, rdims,i,n-length(Ralign)+colsA,sep) + print_matrix_row_std(io, X,Ralign,ldims, rdims,i,n-length(Ralign).+colsA,sep) print(io, i == last(rowsA) ? post : postsp) if i != rowsA[end]; println(io); end if i == rowsA[halfheight] @@ -435,7 +443,7 @@ function print_matrix_std(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vec end function showarray_stdord(io::IO, X::AbstractVecOrMat, ldims::Vector, rdims::Vector, repr::Bool = true; header = true) - if !haskey(io, :compact) + if !haskey(io, :compact) && length(axes(X, 2)) > 1 io = IOContext(io, :compact => true) end if !isempty(X) @@ -455,7 +463,7 @@ function showsparsearray_stdord(io::IO, S::SparseMatrixCSC, ldims::Vector, rdims end pad = ndigits(max(S.m,S.n)) sep = "\n " - if !haskey(io, :compact) + if !haskey(io, :compact) && length(axes(S, 2)) > 1 io = IOContext(io, :compact => true) end diff --git a/src/schroedinger.jl b/src/schroedinger.jl index 439eee69..afb063b4 100644 --- a/src/schroedinger.jl +++ b/src/schroedinger.jl @@ -22,7 +22,7 @@ Integrate Schroedinger equation. therefore must not be changed. """ function schroedinger(tspan, psi0::T, H::Operator; - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) where T<:StateVector tspan_ = convert(Vector{Float64}, tspan) check_schroedinger(psi0, H) @@ -49,7 +49,7 @@ Integrate time-dependent Schroedinger equation. therefore must not be changed. """ function schroedinger_dynamic(tspan, psi0::T, f::Function; - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) where T<:StateVector tspan_ = convert(Vector{Float64}, tspan) dschroedinger_(t::Float64, psi::T, dpsi::T) = dschroedinger_dynamic(t, psi, f, dpsi) @@ -60,8 +60,8 @@ function schroedinger_dynamic(tspan, psi0::T, f::Function; end -recast!(x::Vector{Complex128}, psi::StateVector) = (psi.data = x); -recast!(psi::StateVector, x::Vector{Complex128}) = nothing +recast!(x::Vector{ComplexF64}, psi::StateVector) = (psi.data = x); +recast!(psi::StateVector, x::Vector{ComplexF64}) = nothing function dschroedinger(psi::Ket, H::Operator, dpsi::Ket) diff --git a/src/semiclassical.jl b/src/semiclassical.jl index b417a529..fcf86a95 100644 --- a/src/semiclassical.jl +++ b/src/semiclassical.jl @@ -8,7 +8,7 @@ using ..bases, ..states, ..operators, ..operators_dense, ..timeevolution const QuantumState = Union{Ket, DenseOperator} -const DecayRates = Union{Void, Vector{Float64}, Matrix{Float64}} +const DecayRates = Union{Nothing, Vector{Float64}, Matrix{Float64}} """ Semi-classical state. @@ -18,7 +18,7 @@ a classical part that is specified as a complex vector of arbitrary length. """ mutable struct State{T<:QuantumState} quantum::T - classical::Vector{Complex128} + classical::Vector{ComplexF64} end Base.length(state::State) = length(state.quantum) + length(state.classical) @@ -58,11 +58,11 @@ Integrate time-dependent Schrödinger equation coupled to a classical system. * `kwargs...`: Further arguments are passed on to the ode solver. """ function schroedinger_dynamic(tspan, state0::State{Ket}, fquantum::Function, fclassical::Function; - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) tspan_ = convert(Vector{Float64}, tspan) dschroedinger_(t, state::State{Ket}, dstate::State{Ket}) = dschroedinger_dynamic(t, state, fquantum, fclassical, dstate) - x0 = Vector{Complex128}(length(state0)) + x0 = Vector{ComplexF64}(undef, length(state0)) recast!(state0, x0) state = copy(state0) dstate = copy(state0) @@ -89,15 +89,15 @@ Integrate time-dependent master equation coupled to a classical system. * `kwargs...`: Further arguments are passed on to the ode solver. """ function master_dynamic(tspan, state0::State{DenseOperator}, fquantum, fclassical; - rates::Union{Vector{Float64}, Matrix{Float64}, Void}=nothing, - fout::Union{Function,Void}=nothing, + rates::Union{Vector{Float64}, Matrix{Float64}, Nothing}=nothing, + fout::Union{Function,Nothing}=nothing, tmp::DenseOperator=copy(state0.quantum), kwargs...) tspan_ = convert(Vector{Float64}, tspan) function dmaster_(t, state::State{DenseOperator}, dstate::State{DenseOperator}) dmaster_h_dynamic(t, state, fquantum, fclassical, rates, dstate, tmp) end - x0 = Vector{Complex128}(length(state0)) + x0 = Vector{ComplexF64}(undef, length(state0)) recast!(state0, x0) state = copy(state0) dstate = copy(state0) @@ -109,17 +109,17 @@ function master_dynamic(tspan, state0::State{Ket}, fquantum, fclassical; kwargs. end -function recast!(state::State, x::Vector{Complex128}) +function recast!(state::State, x::Vector{ComplexF64}) N = length(state.quantum) - copy!(x, 1, state.quantum.data, 1, N) - copy!(x, N+1, state.classical, 1, length(state.classical)) + copyto!(x, 1, state.quantum.data, 1, N) + copyto!(x, N+1, state.classical, 1, length(state.classical)) x end -function recast!(x::Vector{Complex128}, state::State) +function recast!(x::Vector{ComplexF64}, state::State) N = length(state.quantum) - copy!(state.quantum.data, 1, x, 1, N) - copy!(state.classical, 1, x, N+1, length(state.classical)) + copyto!(state.quantum.data, 1, x, 1, N) + copyto!(state.classical, 1, x, N+1, length(state.classical)) end function dschroedinger_dynamic(t::Float64, state::State{Ket}, fquantum::Function, diff --git a/src/sortedindices.jl b/src/sortedindices.jl index 9dc26994..6d102b21 100644 --- a/src/sortedindices.jl +++ b/src/sortedindices.jl @@ -5,7 +5,7 @@ module sortedindices """ function complement(N::Int, indices::Vector{Int}) L = length(indices) - x = Vector{Int}(N - L) + x = Vector{Int}(undef, N - L) i_ = 1 # Position in the x vector j = 1 # Position in indices vector for i=1:N @@ -133,16 +133,16 @@ end function reducedindices(I_::Vector{Int}, I::Vector{Int}) N = length(I_) - x = Vector{Int}(N) + x = Vector{Int}(undef, N) for n in 1:N - x[n] = findfirst(I, I_[n]) + x[n] = findfirst(isequal(I_[n]), I) end x end function reducedindices!(I_::Vector{Int}, I::Vector{Int}) for n in 1:length(I_) - I_[n] = findfirst(I, I_[n]) + I_[n] = findfirst(isequal(I_[n]), I) end end diff --git a/src/sparsematrix.jl b/src/sparsematrix.jl index 2211b5cc..02c5c30e 100644 --- a/src/sparsematrix.jl +++ b/src/sparsematrix.jl @@ -1,10 +1,12 @@ module sparsematrix -const SparseMatrix = SparseMatrixCSC{Complex128, Int} +using SparseArrays, LinearAlgebra +const SparseMatrix = SparseMatrixCSC{ComplexF64, Int} -function gemm_sp_dense_small(alpha::Complex128, M::SparseMatrix, B::Matrix{Complex128}, result::Matrix{Complex128}) - if alpha == Complex128(1.) + +function gemm_sp_dense_small(alpha::ComplexF64, M::SparseMatrix, B::Matrix{ComplexF64}, result::Matrix{ComplexF64}) + if alpha == ComplexF64(1.) @inbounds for colindex = 1:M.n @inbounds for i=M.colptr[colindex]:M.colptr[colindex+1]-1 row = M.rowval[i] @@ -27,8 +29,8 @@ function gemm_sp_dense_small(alpha::Complex128, M::SparseMatrix, B::Matrix{Compl end end -function gemm_sp_dense_big(alpha::Complex128, M::SparseMatrix, B::Matrix{Complex128}, result::Matrix{Complex128}) - if alpha == Complex128(1.) +function gemm_sp_dense_big(alpha::ComplexF64, M::SparseMatrix, B::Matrix{ComplexF64}, result::Matrix{ComplexF64}) + if alpha == ComplexF64(1.) @inbounds for j=1:size(B, 2) @inbounds for colindex = 1:M.n m2 = B[colindex, j] @@ -51,11 +53,11 @@ function gemm_sp_dense_big(alpha::Complex128, M::SparseMatrix, B::Matrix{Complex end end -function gemm!(alpha::Complex128, M::SparseMatrix, B::Matrix{Complex128}, beta::Complex128, result::Matrix{Complex128}) - if beta == Complex128(0.) +function gemm!(alpha::ComplexF64, M::SparseMatrix, B::Matrix{ComplexF64}, beta::ComplexF64, result::Matrix{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(result, beta) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end if nnz(M) > 1000 gemm_sp_dense_big(alpha, M, B, result) @@ -64,14 +66,14 @@ function gemm!(alpha::Complex128, M::SparseMatrix, B::Matrix{Complex128}, beta:: end end -function gemm!(alpha::Complex128, B::Matrix{Complex128}, M::SparseMatrix, beta::Complex128, result::Matrix{Complex128}) - if beta == Complex128(0.) +function gemm!(alpha::ComplexF64, B::Matrix{ComplexF64}, M::SparseMatrix, beta::ComplexF64, result::Matrix{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(result, beta) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end dimB = size(result,1) - if alpha == Complex128(1.) + if alpha == ComplexF64(1.) @inbounds for colindex = 1:M.n @inbounds for i=M.colptr[colindex]:M.colptr[colindex+1]-1 mi = M.nzval[i] @@ -94,13 +96,13 @@ function gemm!(alpha::Complex128, B::Matrix{Complex128}, M::SparseMatrix, beta:: end end -function gemv!(alpha::Complex128, M::SparseMatrix, v::Vector{Complex128}, beta::Complex128, result::Vector{Complex128}) - if beta == Complex128(0.) +function gemv!(alpha::ComplexF64, M::SparseMatrix, v::Vector{ComplexF64}, beta::ComplexF64, result::Vector{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(result, beta) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end - if alpha == Complex128(1.) + if alpha == ComplexF64(1.) @inbounds for colindex = 1:M.n vj = v[colindex] for i=M.colptr[colindex]:M.colptr[colindex+1]-1 @@ -117,13 +119,13 @@ function gemv!(alpha::Complex128, M::SparseMatrix, v::Vector{Complex128}, beta:: end end -function gemv!(alpha::Complex128, v::Vector{Complex128}, M::SparseMatrix, beta::Complex128, result::Vector{Complex128}) - if beta == Complex128(0.) +function gemv!(alpha::ComplexF64, v::Vector{ComplexF64}, M::SparseMatrix, beta::ComplexF64, result::Vector{ComplexF64}) + if beta == ComplexF64(0.) fill!(result, beta) - elseif beta != Complex128(1.) - scale!(result, beta) + elseif beta != ComplexF64(1.) + rmul!(result, beta) end - if alpha == Complex128(1.) + if alpha == ComplexF64(1.) @inbounds for colindex=1:M.n for i=M.colptr[colindex]:M.colptr[colindex+1]-1 result[colindex] += M.nzval[i]*v[M.rowval[i]] @@ -139,21 +141,21 @@ function gemv!(alpha::Complex128, v::Vector{Complex128}, M::SparseMatrix, beta:: end function sub2sub(shape1::NTuple{N, Int}, shape2::NTuple{M, Int}, I::CartesianIndex{N}) where {N, M} - linearindex = sub2ind(shape1, I.I...) - CartesianIndex(ind2sub(shape2, linearindex)...) + linearindex = LinearIndices(shape1)[I.I...] + CartesianIndices(shape2)[linearindex] end function ptrace(x, shape_nd::Vector{Int}, indices::Vector{Int}) - shape_nd = (shape_nd...) + shape_nd = (shape_nd...,) N = div(length(shape_nd), 2) shape_2d = (x.m, x.n) - shape_nd_after = ([i ∈ indices || i-N ∈ indices ? 1 : shape_nd[i] for i=1:2*N]...) + shape_nd_after = ([i ∈ indices || i-N ∈ indices ? 1 : shape_nd[i] for i=1:2*N]...,) shape_2d_after = (prod(shape_nd_after[1:N]), prod(shape_nd_after[N+1:end])) I_nd_after_max = CartesianIndex(shape_nd_after...) - y = spzeros(Complex128, shape_2d_after...) + y = spzeros(ComplexF64, shape_2d_after...) for I in eachindex(x) I_nd = sub2sub(shape_2d, shape_nd, I) - if I_nd.I[indices] != I_nd.I[indices + N] + if I_nd.I[indices] != I_nd.I[indices .+ N] continue end I_after = sub2sub(shape_nd_after, shape_2d_after, min(I_nd, I_nd_after_max)) @@ -163,15 +165,15 @@ function ptrace(x, shape_nd::Vector{Int}, indices::Vector{Int}) end function permutedims(x, shape, perm) - shape = (shape...) - shape_perm = ([shape[i] for i in perm]...) - y = spzeros(Complex128, x.m, x.n) + shape = (shape...,) + shape_perm = ([shape[i] for i in perm]...,) + y = spzeros(ComplexF64, x.m, x.n) for I in eachindex(x) - linear_index = sub2ind((x.m, x.n), I.I...) - cartesian_index = ind2sub(shape, linear_index) + linear_index = LinearIndices((x.m, x.n))[I.I...] + cartesian_index = CartesianIndices(shape)[linear_index] cartesian_index_perm = [cartesian_index[i] for i=perm] - linear_index_perm = sub2ind(shape_perm, cartesian_index_perm...) - J = ind2sub((x.m, x.n), linear_index_perm) + linear_index_perm = LinearIndices(shape_perm)[cartesian_index_perm...] + J = Tuple(CartesianIndices((x.m, x.n))[linear_index_perm]) y[J...] = x[I.I...] end y diff --git a/src/spectralanalysis.jl b/src/spectralanalysis.jl index be90450c..f097816c 100644 --- a/src/spectralanalysis.jl +++ b/src/spectralanalysis.jl @@ -3,6 +3,7 @@ module spectralanalysis export eigenstates, eigenenergies, simdiag using ..bases, ..states, ..operators, ..operators_dense, ..operators_sparse +using Arpack, LinearAlgebra const nonhermitian_warning = "The given operator is not hermitian. If this is due to a numerical error make the operator hermitian first by calculating (x+dagger(x))/2 first." @@ -12,15 +13,15 @@ const nonhermitian_warning = "The given operator is not hermitian. If this is du Calculate the lowest n eigenvalues and their corresponding eigenstates. -This is just a thin wrapper around julia's `eig` and `eigs` functions. Which +This is just a thin wrapper around julia's `eigen` and `eigs` functions. Which of them is used depends on the type of the given operator. If more control about the way the calculation is done is needed, use the functions directly. More details can be found at [http://docs.julialang.org/en/stable/stdlib/linalg/]. -NOTE: Especially for small systems full diagonalization with Julia's `eig` +NOTE: Especially for small systems full diagonalization with Julia's `eigen` function is often more desirable. You can convert a sparse operator `A` to a -dense one using `full(A)`. +dense one using `dense(A)`. If the given operator is non-hermitian a warning is given. This behavior can be turned off using the keyword `warning=false`. @@ -28,12 +29,12 @@ can be turned off using the keyword `warning=false`. function eigenstates(op::DenseOperator, n::Int=length(basis(op)); warning=true) b = basis(op) if ishermitian(op) - D, V = eig(Hermitian(op.data), 1:n) + D, V = eigen(Hermitian(op.data), 1:n) states = [Ket(b, V[:, k]) for k=1:length(D)] return D, states else warning && warn(nonhermitian_warning) - D, V = eig(op.data) + D, V = eigen(op.data) states = [Ket(b, V[:, k]) for k=1:length(D)] perm = sortperm(D, by=real) permute!(D, perm) @@ -52,7 +53,7 @@ function eigenstates(op::SparseOperator, n::Int=6; warning::Bool=true, ishermitian(op) || (warning && warn(nonhermitian_warning)) info && println("INFO: Defaulting to sparse diagonalization. If storing the full operator is possible, it might be faster to do - eigenstates(full(op)). Set info=false to turn off this message.") + eigenstates(dense(op)). Set info=false to turn off this message.") D, V = eigs(op.data; which=:SR, nev=n, kwargs...) states = [Ket(b, V[:, k]) for k=1:length(D)] D, states @@ -125,9 +126,9 @@ function simdiag(ops::Vector{T}; atol::Real=1e-14, rtol::Real=1e-14) where T<:De end end - d, v = eig(sum(ops).data) + d, v = eigen(sum(ops).data) - evals = [Vector{Complex128}(length(d)) for i=1:length(ops)] + evals = [Vector{ComplexF64}(undef, length(d)) for i=1:length(ops)] for i=1:length(ops), j=1:length(d) vec = ops[i].data*v[:, j] evals[i][j] = (v[:, j]'*vec)[1] diff --git a/src/spin.jl b/src/spin.jl index 400ff686..e2df7419 100644 --- a/src/spin.jl +++ b/src/spin.jl @@ -4,9 +4,8 @@ export SpinBasis, sigmax, sigmay, sigmaz, sigmap, sigmam, spinup, spindown import Base: == -using Compat using ..bases, ..states, ..operators, ..operators_sparse - +using SparseArrays """ SpinBasis(n) @@ -40,8 +39,8 @@ Pauli ``σ_x`` operator for the given Spin basis. """ function sigmax(b::SpinBasis) N = length(b) - diag = Complex128[complex(sqrt(real((b.spinnumber + 1)*2*a - a*(a+1)))) for a=1:N-1] - data = spdiagm(diag, 1, N, N) + spdiagm(diag, -1, N, N) + diag = ComplexF64[complex(sqrt(real((b.spinnumber + 1)*2*a - a*(a+1)))) for a=1:N-1] + data = spdiagm(1 => diag, -1 => diag) SparseOperator(b, data) end @@ -52,8 +51,8 @@ Pauli ``σ_y`` operator for the given Spin basis. """ function sigmay(b::SpinBasis) N = length(b) - diag = Complex128[1im*complex(sqrt(real((b.spinnumber + 1)*2*a - a*(a+1)))) for a=1:N-1] - data = spdiagm(diag, -1, N, N) - spdiagm(diag, 1, N, N) + diag = ComplexF64[1im*complex(sqrt(real((b.spinnumber + 1)*2*a - a*(a+1)))) for a=1:N-1] + data = spdiagm(-1 => diag, 1 => -diag) SparseOperator(b, data) end @@ -64,8 +63,8 @@ Pauli ``σ_z`` operator for the given Spin basis. """ function sigmaz(b::SpinBasis) N = length(b) - diag = Complex128[complex(2*m) for m=b.spinnumber:-1:-b.spinnumber] - data = spdiagm(diag, 0, N, N) + diag = ComplexF64[complex(2*m) for m=b.spinnumber:-1:-b.spinnumber] + data = spdiagm(0 => diag) SparseOperator(b, data) end @@ -77,8 +76,8 @@ Raising operator ``σ_+`` for the given Spin basis. function sigmap(b::SpinBasis) N = length(b) S = (b.spinnumber + 1)*b.spinnumber - diag = Complex128[complex(sqrt(float(S - m*(m+1)))) for m=b.spinnumber-1:-1:-b.spinnumber] - data = spdiagm(diag, 1, N, N) + diag = ComplexF64[complex(sqrt(float(S - m*(m+1)))) for m=b.spinnumber-1:-1:-b.spinnumber] + data = spdiagm(1 => diag) SparseOperator(b, data) end @@ -91,7 +90,7 @@ function sigmam(b::SpinBasis) N = length(b) S = (b.spinnumber + 1)*b.spinnumber diag = [complex(sqrt(float(S - m*(m-1)))) for m=b.spinnumber:-1:-b.spinnumber+1] - data = spdiagm(diag, -1, N, N) + data = spdiagm(-1 => diag) SparseOperator(b, data) end diff --git a/src/state_definitions.jl b/src/state_definitions.jl index b0c98dd7..6cf80897 100644 --- a/src/state_definitions.jl +++ b/src/state_definitions.jl @@ -3,7 +3,7 @@ module state_definitions export randstate, randoperator, thermalstate, coherentthermalstate, phase_average, passive_state using ..bases, ..states, ..operators, ..operators_dense, ..fock - +using LinearAlgebra """ randstate(basis) @@ -11,7 +11,7 @@ using ..bases, ..states, ..operators, ..operators_dense, ..fock Calculate a random normalized ket state. """ function randstate(b::Basis) - psi = Ket(b, rand(Complex128, length(b))) + psi = Ket(b, rand(ComplexF64, length(b))) normalize!(psi) psi end @@ -21,7 +21,7 @@ end Calculate a random unnormalized dense operator. """ -randoperator(b1::Basis, b2::Basis) = DenseOperator(b1, b2, rand(Complex128, length(b1), length(b2))) +randoperator(b1::Basis, b2::Basis) = DenseOperator(b1, b2, rand(ComplexF64, length(b1), length(b2))) randoperator(b::Basis) = randoperator(b, b) """ @@ -30,7 +30,7 @@ randoperator(b::Basis) = randoperator(b, b) Thermal state ``exp(-H/T)/Tr[exp(-H/T)]``. """ function thermalstate(H::Operator,T::Real) - return normalize(expm(-full(H)/T)) + return normalize(exp(-dense(H)/T)) end """ @@ -48,7 +48,7 @@ end Returns the phase-average of ``ρ`` containing only the diagonal elements. """ function phase_average(rho::DenseOperator) - return DenseOperator(basis(rho),diagm(diag(rho.data))) + return DenseOperator(basis(rho),diagm(0 => diag(rho.data))) end """ @@ -57,8 +57,7 @@ end Passive state ``π`` of ``ρ``. IncreasingEigenenergies=true means that higher indices correspond to higher energies. """ function passive_state(rho::DenseOperator,IncreasingEigenenergies::Bool=true) - return DenseOperator(basis(rho),diagm(sort(abs.(eigvals(rho.data)),rev=IncreasingEigenenergies))) + return DenseOperator(basis(rho),diagm(0 => sort!(abs.(eigvals(rho.data)),rev=IncreasingEigenenergies))) end end #module - diff --git a/src/states.jl b/src/states.jl index 32dbe9a7..f30131f7 100644 --- a/src/states.jl +++ b/src/states.jl @@ -3,11 +3,12 @@ module states export StateVector, Bra, Ket, length, basis, dagger, tensor, norm, normalize, normalize!, permutesystems, basisstate -import Base: ==, +, -, *, /, length, copy, norm, normalize, normalize! +import Base: ==, +, -, *, /, length, copy +import LinearAlgebra: norm, normalize, normalize! import ..bases: basis, tensor, permutesystems, check_multiplicable, samebases -using Compat using ..bases +using LinearAlgebra """ @@ -27,7 +28,7 @@ Bra state defined by coefficients in respect to the basis. """ mutable struct Bra <: StateVector basis::Basis - data::Vector{Complex128} + data::Vector{ComplexF64} function Bra(b::Basis, data) if length(b) != length(data) throw(DimensionMismatch()) @@ -43,7 +44,7 @@ Ket state defined by coefficients in respect to the given basis. """ mutable struct Ket <: StateVector basis::Basis - data::Vector{Complex128} + data::Vector{ComplexF64} function Ket(b::Basis, data) if length(b) != length(data) throw(DimensionMismatch()) @@ -52,8 +53,8 @@ mutable struct Ket <: StateVector end end -Bra(b::Basis) = Bra(b, zeros(Complex128, length(b))) -Ket(b::Basis) = Ket(b, zeros(Complex128, length(b))) +Bra(b::Basis) = Bra(b, zeros(ComplexF64, length(b))) +Ket(b::Basis) = Ket(b, zeros(ComplexF64, length(b))) copy(a::T) where {T<:StateVector} = T(a.basis, copy(a.data)) length(a::StateVector) = length(a.basis)::Int @@ -109,7 +110,7 @@ normalize(x::StateVector) = x/norm(x) In-place normalization of the given bra or ket so that `norm(x)` is one. """ -normalize!(x::StateVector) = scale!(x.data, 1./norm(x)) +normalize!(x::StateVector) = (rmul!(x.data, 1.0/norm(x)); nothing) function permutesystems(state::T, perm::Vector{Int}) where T<:StateVector @assert length(state.basis.bases) == length(perm) @@ -131,13 +132,13 @@ product state ``|i_1⟩⊗|i_2⟩⊗…⊗|i_n⟩`` of the corresponding basis s """ function basisstate(b::Basis, indices::Vector{Int}) @assert length(b.shape) == length(indices) - x = zeros(Complex128, length(b)) - x[sub2ind(tuple(b.shape...), indices...)] = Complex(1.) + x = zeros(ComplexF64, length(b)) + x[LinearIndices(tuple(b.shape...))[indices...]] = Complex(1.) Ket(b, x) end function basisstate(b::Basis, index::Int) - data = zeros(Complex128, length(b)) + data = zeros(ComplexF64, length(b)) data[index] = Complex(1.) Ket(b, data) end diff --git a/src/steadystate.jl b/src/steadystate.jl index db836640..0af87e4c 100644 --- a/src/steadystate.jl +++ b/src/steadystate.jl @@ -2,7 +2,7 @@ module steadystate using ..states, ..operators, ..operators_dense, ..superoperators using ..timeevolution - +using Arpack, LinearAlgebra """ steadystate.master(H, J; ) @@ -29,9 +29,9 @@ Calculate steady state using long time master equation evolution. function master(H::Operator, J::Vector; rho0::DenseOperator=tensor(basisstate(H.basis_l, 1), dagger(basisstate(H.basis_r, 1))), hmin=1e-7, tol=1e-3, - rates::Union{Vector{Float64}, Matrix{Float64}, Void}=nothing, + rates::Union{Vector{Float64}, Matrix{Float64}, Nothing}=nothing, Jdagger::Vector=dagger.(J), - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) t,u = timeevolution.master([0., Inf], rho0, H, J; rates=rates, Jdagger=Jdagger, hmin=hmin, hmax=Inf, @@ -54,10 +54,10 @@ sorted according to the absolute value of the eigenvalues. * `nev = min(10, length(L.basis_r[1])*length(L.basis_r[2]))`: Number of eigenvalues. * `which = :LR`: Find eigenvalues with largest real part. Keyword for `eigs` function (ineffective for DenseSuperOperator). -* `kwargs...`: Keyword arguments for the Julia `eig` or `eigs` function. +* `kwargs...`: Keyword arguments for the Julia `eigen` or `eigens` function. """ function liouvillianspectrum(L::DenseSuperOperator; nev::Int = min(10, length(L.basis_r[1])*length(L.basis_r[2])), which::Symbol = :LR, kwargs...) - d, v = Base.eig(L.data; kwargs...) + d, v = eigen(L.data; kwargs...) indices = sortperm(abs.(d))[1:nev] ops = DenseOperator[] for i in indices @@ -70,10 +70,10 @@ end function liouvillianspectrum(L::SparseSuperOperator; nev::Int = min(10, length(L.basis_r[1])*length(L.basis_r[2])), which::Symbol = :LR, kwargs...) d, v, nconv, niter, nmult, resid = try - Base.eigs(L.data; nev = nev, which = which, kwargs...) + eigs(L.data; nev = nev, which = which, kwargs...) catch err - if isa(err, LinAlg.SingularException) || isa(err, LinAlg.ARPACKException) - error("Base.LinAlg.eigs() algorithm failed; try using DenseOperators or change nev.") + if isa(err, SingularException) || isa(err, ARPACKException) + error("Arpack's eigs() algorithm failed; try using DenseOperators or change nev.") else rethrow(err) end @@ -103,7 +103,7 @@ Find steady state by calculating the eigenstate with eigenvalue 0 of the Liouvil faster or for avoiding convergence errors of `eigs`. Changing `nev` thus only makes sense when using SparseSuperOperator. * `which = :LR`: Find eigenvalues with largest real part. Keyword for `eigs` function (ineffective for DenseSuperOperator). -* `kwargs...`: Keyword arguments for the Julia `eig` or `eigs` function. +* `kwargs...`: Keyword arguments for the Julia `eigen` or `eigs` function. """ function eigenvector(L::SuperOperator; tol::Real = 1e-9, nev::Int = 2, which::Symbol = :LR, kwargs...) d, ops = liouvillianspectrum(L; nev = nev, which = which, kwargs...) @@ -112,10 +112,10 @@ function eigenvector(L::SuperOperator; tol::Real = 1e-9, nev::Int = 2, which::Sy end if nev > 1 if abs(real(d[2])) < tol - warn("Several eigenvalues with real part 0 detected; use steadystate.liouvillianspectrum to find out more.") + @warn("Several eigenvalues with real part 0 detected; use steadystate.liouvillianspectrum to find out more.") end end - return ops[1]/trace(ops[1]) + return ops[1]/tr(ops[1]) end eigenvector(H::Operator, J::Vector; rates::Union{Vector{Float64}, Matrix{Float64}}=ones(Float64, length(J)), kwargs...) = eigenvector(liouvillian(H, J; rates=rates); kwargs...) diff --git a/src/stochastic_master.jl b/src/stochastic_master.jl index 76b7a646..f74508a0 100644 --- a/src/stochastic_master.jl +++ b/src/stochastic_master.jl @@ -5,11 +5,12 @@ export master, master_dynamic using ...bases, ...states, ...operators using ...operators_dense, ...operators_sparse using ...timeevolution +using LinearAlgebra import ...timeevolution: integrate_stoch, recast! import ...timeevolution.timeevolution_master: dmaster_h, dmaster_nh, dmaster_h_dynamic, check_master -const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Void} -const DiffArray = Union{Vector{Complex128}, Array{Complex128, 2}} +const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Nothing} +const DiffArray = Union{Vector{ComplexF64}, Array{ComplexF64, 2}} """ stochastic.master(tspan, rho0, H, J, C; ) @@ -44,7 +45,7 @@ function master(tspan, rho0::DenseOperator, H::Operator, J::Vector, C::Vector; rates::DecayRates=nothing, Jdagger::Vector=dagger.(J), Cdagger::Vector=dagger.(C), - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, kwargs...) tmp = copy(rho0) @@ -114,7 +115,7 @@ dynamic Hamiltonian and J. """ function master_dynamic(tspan::Vector{Float64}, rho0::DenseOperator, fdeterm::Function, fstoch::Function; rates::DecayRates=nothing, - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, noise_processes::Int=0, kwargs...) @@ -135,21 +136,21 @@ end master_dynamic(tspan::Vector{Float64}, psi0::Ket, args...; kwargs...) = master_dynamic(tspan, dm(psi0), args...; kwargs...) # Derivative functions -function dmaster_stochastic(dx::Vector{Complex128}, rho::DenseOperator, +function dmaster_stochastic(dx::Vector{ComplexF64}, rho::DenseOperator, C::Vector, Cdagger::Vector, drho::DenseOperator, ::Int) recast!(dx, drho) operators.gemm!(1, C[1], rho, 0, drho) operators.gemm!(1, rho, Cdagger[1], 1, drho) - drho.data .-= trace(drho)*rho.data + drho.data .-= tr(drho)*rho.data end -function dmaster_stochastic(dx::Array{Complex128, 2}, rho::DenseOperator, +function dmaster_stochastic(dx::Array{ComplexF64, 2}, rho::DenseOperator, C::Vector, Cdagger::Vector, drho::DenseOperator, n::Int) for i=1:n dx_i = @view dx[:, i] recast!(dx_i, drho) operators.gemm!(1, C[i], rho, 0, drho) operators.gemm!(1, rho, Cdagger[i], 1, drho) - drho.data .-= trace(drho)*rho.data + drho.data .-= tr(drho)*rho.data recast!(drho, dx_i) end end @@ -163,7 +164,7 @@ function dmaster_stoch_dynamic(dx::DiffArray, t::Float64, rho::DenseOperator, end function integrate_master_stoch(tspan, df::Function, dg::Function, - rho0::DenseOperator, fout::Union{Void, Function}, + rho0::DenseOperator, fout::Union{Nothing, Function}, n::Int; kwargs...) tspan_ = convert(Vector{Float64}, tspan) @@ -174,10 +175,10 @@ function integrate_master_stoch(tspan, df::Function, dg::Function, end # TODO: Speed up by recasting to n-d arrays, remove vector methods -function recast!(x::Union{Vector{Complex128}, SubArray{Complex128, 1}}, rho::DenseOperator) +function recast!(x::Union{Vector{ComplexF64}, SubArray{ComplexF64, 1}}, rho::DenseOperator) rho.data = reshape(x, size(rho.data)) end -recast!(state::DenseOperator, x::SubArray{Complex128, 1}) = (x[:] = state.data) -recast!(state::DenseOperator, x::Vector{Complex128}) = nothing +recast!(state::DenseOperator, x::SubArray{ComplexF64, 1}) = (x[:] = state.data) +recast!(state::DenseOperator, x::Vector{ComplexF64}) = nothing end # module diff --git a/src/stochastic_schroedinger.jl b/src/stochastic_schroedinger.jl index ca85dddb..dc0824e8 100644 --- a/src/stochastic_schroedinger.jl +++ b/src/stochastic_schroedinger.jl @@ -5,12 +5,13 @@ export schroedinger, schroedinger_dynamic using ...bases, ...states, ...operators using ...operators_dense, ...operators_sparse using ...timeevolution +using LinearAlgebra import ...timeevolution: integrate_stoch, recast! import ...timeevolution.timeevolution_schroedinger: dschroedinger, dschroedinger_dynamic, check_schroedinger import DiffEqCallbacks -const DiffArray = Union{Vector{Complex128}, Array{Complex128, 2}} +const DiffArray = Union{Vector{ComplexF64}, Array{ComplexF64, 2}} """ stochastic.schroedinger(tspan, state0, H, Hs[; fout, ...]) @@ -32,7 +33,7 @@ Integrate stochastic Schrödinger equation. * `kwargs...`: Further arguments are passed on to the ode solver. """ function schroedinger(tspan, psi0::Ket, H::Operator, Hs::Vector; - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, normalize_state::Bool=false, calback=nothing, kwargs...) @@ -49,7 +50,7 @@ function schroedinger(tspan, psi0::Ket, H::Operator, Hs::Vector; t::Float64, psi::Ket, dpsi::Ket, n::Int) = dschroedinger_stochastic(dx, psi, Hs, dpsi, n) if normalize_state - norm_func(u::Vector{Complex128}, t::Float64, integrator) = normalize!(u) + norm_func(u::Vector{ComplexF64}, t::Float64, integrator) = normalize!(u) ncb = DiffEqCallbacks.FunctionCallingCallback(norm_func; func_everystep=true, func_start=false) @@ -90,7 +91,7 @@ Integrate stochastic Schrödinger equation with dynamic Hamiltonian. * `kwargs...`: Further arguments are passed on to the ode solver. """ function schroedinger_dynamic(tspan, psi0::Ket, fdeterm::Function, fstoch::Function; - fout::Union{Function,Void}=nothing, noise_processes::Int=0, + fout::Union{Function,Nothing}=nothing, noise_processes::Int=0, normalize_state::Bool=false, kwargs...) tspan_ = convert(Vector{Float64}, tspan) @@ -112,7 +113,7 @@ function schroedinger_dynamic(tspan, psi0::Ket, fdeterm::Function, fstoch::Funct dschroedinger_stochastic(dx, t, psi, fstoch, dpsi, n) if normalize_state - norm_func(u::Vector{Complex128}, t::Float64, integrator) = normalize!(u) + norm_func(u::Vector{ComplexF64}, t::Float64, integrator) = normalize!(u) ncb = DiffEqCallbacks.FunctionCallingCallback(norm_func; func_everystep=true, func_start=false) @@ -127,13 +128,13 @@ function schroedinger_dynamic(tspan, psi0::Ket, fdeterm::Function, fstoch::Funct end -function dschroedinger_stochastic(dx::Vector{Complex128}, psi::Ket, Hs::Vector{T}, +function dschroedinger_stochastic(dx::Vector{ComplexF64}, psi::Ket, Hs::Vector{T}, dpsi::Ket, index::Int) where T <: Operator check_schroedinger(psi, Hs[index]) recast!(dx, dpsi) dschroedinger(psi, Hs[index], dpsi) end -function dschroedinger_stochastic(dx::Array{Complex128, 2}, psi::Ket, Hs::Vector{T}, +function dschroedinger_stochastic(dx::Array{ComplexF64, 2}, psi::Ket, Hs::Vector{T}, dpsi::Ket, n::Int) where T <: Operator for i=1:n check_schroedinger(psi, Hs[i]) @@ -149,7 +150,7 @@ function dschroedinger_stochastic(dx::DiffArray, dschroedinger_stochastic(dx, psi, ops, dpsi, n) end -recast!(psi::StateVector, x::SubArray{Complex128, 1}) = (x .= psi.data) -recast!(x::SubArray{Complex128, 1}, psi::StateVector) = (psi.data = x) +recast!(psi::StateVector, x::SubArray{ComplexF64, 1}) = (x .= psi.data) +recast!(x::SubArray{ComplexF64, 1}, psi::StateVector) = (psi.data = x) end # module diff --git a/src/stochastic_semiclassical.jl b/src/stochastic_semiclassical.jl index cabf97de..86cd719c 100644 --- a/src/stochastic_semiclassical.jl +++ b/src/stochastic_semiclassical.jl @@ -10,11 +10,12 @@ using ...timeevolution import ...timeevolution: integrate_stoch import ...timeevolution.timeevolution_schroedinger: dschroedinger, dschroedinger_dynamic using ...stochastic +using LinearAlgebra import DiffEqCallbacks -const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Void} -const DiffArray = Union{Vector{Complex128}, Array{Complex128, 2}} +const DecayRates = Union{Vector{Float64}, Matrix{Float64}, Nothing} +const DiffArray = Union{Vector{ComplexF64}, Array{ComplexF64, 2}} """ semiclassical.schroedinger_stochastic(tspan, state0, fquantum, fclassical[; fout, ...]) @@ -55,9 +56,9 @@ Integrate time-dependent Schrödinger equation coupled to a classical system. * `kwargs...`: Further arguments are passed on to the ode solver. """ function schroedinger_semiclassical(tspan, state0::State{Ket}, fquantum::Function, - fclassical::Function; fstoch_quantum::Union{Void, Function}=nothing, - fstoch_classical::Union{Void, Function}=nothing, - fout::Union{Function,Void}=nothing, + fclassical::Function; fstoch_quantum::Union{Nothing, Function}=nothing, + fstoch_classical::Union{Nothing, Function}=nothing, + fout::Union{Function,Nothing}=nothing, noise_processes::Int=0, noise_prototype_classical=nothing, normalize_state::Bool=false, @@ -65,11 +66,11 @@ function schroedinger_semiclassical(tspan, state0::State{Ket}, fquantum::Functio tspan_ = convert(Vector{Float64}, tspan) dschroedinger_det(t::Float64, state::State{Ket}, dstate::State{Ket}) = semiclassical.dschroedinger_dynamic(t, state, fquantum, fclassical, dstate) - if isa(fstoch_quantum, Void) && isa(fstoch_classical, Void) + if isa(fstoch_quantum, Nothing) && isa(fstoch_classical, Nothing) throw(ArgumentError("No stochastic functions provided!")) end - x0 = Vector{Complex128}(length(state0)) + x0 = Vector{ComplexF64}(undef, length(state0)) recast!(state0, x0) state = copy(state0) dstate = copy(state0) @@ -85,15 +86,15 @@ function schroedinger_semiclassical(tspan, state0::State{Ket}, fquantum::Functio end if n > 0 && isa(fstoch_classical, Function) - if isa(noise_prototype_classical, Void) + if isa(noise_prototype_classical, Nothing) throw(ArgumentError("noise_prototype_classical must be set for combinations of quantum and classical noise!")) end end if normalize_state len_q = length(state0.quantum) - function norm_func(u::Vector{Complex128}, t::Float64, integrator) - u .= [normalize!(u[1:len_q]), u[len_q+1:end];] + function norm_func(u::Vector{ComplexF64}, t::Float64, integrator) + u .= [normalize!(u[1:len_q]); u[len_q+1:end]] end ncb = DiffEqCallbacks.FunctionCallingCallback(norm_func; func_everystep=true, @@ -156,17 +157,17 @@ non-hermitian Hamiltonian and then calls master_nh which is slightly faster. """ function master_semiclassical(tspan::Vector{Float64}, rho0::State{DenseOperator}, fquantum::Function, fclassical::Function; - fstoch_quantum::Union{Function, Void}=nothing, - fstoch_classical::Union{Function, Void}=nothing, + fstoch_quantum::Union{Function, Nothing}=nothing, + fstoch_classical::Union{Function, Nothing}=nothing, rates::DecayRates=nothing, - fout::Union{Function,Void}=nothing, + fout::Union{Function,Nothing}=nothing, noise_processes::Int=0, noise_prototype_classical=nothing, nonlinear::Bool=true, kwargs...) tmp = copy(rho0.quantum) - if isa(fstoch_quantum, Void) && isa(fstoch_classical, Void) + if isa(fstoch_quantum, Nothing) && isa(fstoch_classical, Nothing) throw(ArgumentError("No stochastic functions provided!")) end @@ -181,7 +182,7 @@ function master_semiclassical(tspan::Vector{Float64}, rho0::State{DenseOperator} end if n > 0 && isa(fstoch_classical, Function) - if isa(noise_prototype_classical, Void) + if isa(noise_prototype_classical, Nothing) throw(ArgumentError("noise_prototype_classical must be set for combinations of quantum and classical noise!")) end end @@ -201,17 +202,17 @@ master_semiclassical(tspan::Vector{Float64}, psi0::State{Ket}, args...; kwargs.. master_semiclassical(tspan, dm(psi0), args...; kwargs...) # Derivative functions -function dschroedinger_stochastic(dx::Vector{Complex128}, t::Float64, - state::State{Ket}, fstoch_quantum::Function, fstoch_classical::Void, +function dschroedinger_stochastic(dx::Vector{ComplexF64}, t::Float64, + state::State{Ket}, fstoch_quantum::Function, fstoch_classical::Nothing, dstate::State{Ket}, ::Int) H = fstoch_quantum(t, state.quantum, state.classical) recast!(dx, dstate) dschroedinger(state.quantum, H[1], dstate.quantum) recast!(dstate, dx) end -function dschroedinger_stochastic(dx::Array{Complex128, 2}, +function dschroedinger_stochastic(dx::Array{ComplexF64, 2}, t::Float64, state::State{Ket}, fstoch_quantum::Function, - fstoch_classical::Void, dstate::State{Ket}, n::Int) + fstoch_classical::Nothing, dstate::State{Ket}, n::Int) H = fstoch_quantum(t, state.quantum, state.classical) for i=1:n dx_i = @view dx[:, i] @@ -221,12 +222,12 @@ function dschroedinger_stochastic(dx::Array{Complex128, 2}, end end function dschroedinger_stochastic(dx::DiffArray, t::Float64, - state::State{Ket}, fstoch_quantum::Void, fstoch_classical::Function, + state::State{Ket}, fstoch_quantum::Nothing, fstoch_classical::Function, dstate::State{Ket}, ::Int) dclassical = @view dx[length(state.quantum)+1:end, :] fstoch_classical(t, state.quantum, state.classical, dclassical) end -function dschroedinger_stochastic(dx::Array{Complex128, 2}, t::Float64, state::State{Ket}, fstoch_quantum::Function, +function dschroedinger_stochastic(dx::Array{ComplexF64, 2}, t::Float64, state::State{Ket}, fstoch_quantum::Function, fstoch_classical::Function, dstate::State{Ket}, n::Int) dschroedinger_stochastic(dx, t, state, fstoch_quantum, nothing, dstate, n) @@ -234,21 +235,21 @@ function dschroedinger_stochastic(dx::Array{Complex128, 2}, t::Float64, state::S fstoch_classical(t, state.quantum, state.classical, dx_i) end -function dmaster_stoch_dynamic(dx::Vector{Complex128}, t::Float64, +function dmaster_stoch_dynamic(dx::Vector{ComplexF64}, t::Float64, state::State{DenseOperator}, fstoch_quantum::Function, - fstoch_classical::Void, dstate::State{DenseOperator}, ::Int) + fstoch_classical::Nothing, dstate::State{DenseOperator}, ::Int) result = fstoch_quantum(t, state.quantum, state.classical) @assert length(result) == 2 C, Cdagger = result recast!(dx, dstate) operators.gemm!(1, C[1], state.quantum, 0, dstate.quantum) operators.gemm!(1, state.quantum, Cdagger[1], 1, dstate.quantum) - dstate.quantum.data .-= trace(dstate.quantum)*state.quantum.data + dstate.quantum.data .-= tr(dstate.quantum)*state.quantum.data recast!(dstate, dx) end -function dmaster_stoch_dynamic(dx::Array{Complex128, 2}, t::Float64, +function dmaster_stoch_dynamic(dx::Array{ComplexF64, 2}, t::Float64, state::State{DenseOperator}, fstoch_quantum::Function, - fstoch_classical::Void, dstate::State{DenseOperator}, n::Int) + fstoch_classical::Nothing, dstate::State{DenseOperator}, n::Int) result = fstoch_quantum(t, state.quantum, state.classical) @assert length(result) == 2 C, Cdagger = result @@ -257,17 +258,17 @@ function dmaster_stoch_dynamic(dx::Array{Complex128, 2}, t::Float64, recast!(dx_i, dstate) operators.gemm!(1, C[i], state.quantum, 0, dstate.quantum) operators.gemm!(1, state.quantum, Cdagger[i], 1, dstate.quantum) - dstate.quantum.data .-= trace(dstate.quantum)*state.quantum.data + dstate.quantum.data .-= tr(dstate.quantum)*state.quantum.data recast!(dstate, dx_i) end end function dmaster_stoch_dynamic(dx::DiffArray, t::Float64, - state::State{DenseOperator}, fstoch_quantum::Void, + state::State{DenseOperator}, fstoch_quantum::Nothing, fstoch_classical::Function, dstate::State{DenseOperator}, ::Int) dclassical = @view dx[length(state.quantum)+1:end, :] fstoch_classical(t, state.quantum, state.classical, dclassical) end -function dmaster_stoch_dynamic(dx::Array{Complex128, 2}, t::Float64, +function dmaster_stoch_dynamic(dx::Array{ComplexF64, 2}, t::Float64, state::State{DenseOperator}, fstoch_quantum::Function, fstoch_classical::Function, dstate::State{DenseOperator}, n::Int) dmaster_stoch_dynamic(dx, t, state, fstoch_quantum, nothing, dstate, n) @@ -277,27 +278,27 @@ function dmaster_stoch_dynamic(dx::Array{Complex128, 2}, t::Float64, end function integrate_master_stoch(tspan, df::Function, dg::Function, - rho0::State{DenseOperator}, fout::Union{Void, Function}, + rho0::State{DenseOperator}, fout::Union{Nothing, Function}, n::Int; kwargs...) tspan_ = convert(Vector{Float64}, tspan) - x0 = Vector{Complex128}(length(rho0)) + x0 = Vector{ComplexF64}(undef, length(rho0)) recast!(rho0, x0) state = copy(rho0) dstate = copy(rho0) integrate_stoch(tspan_, df, dg, x0, state, dstate, fout, n; kwargs...) end -function recast!(state::State, x::SubArray{Complex128, 1}) +function recast!(state::State, x::SubArray{ComplexF64, 1}) N = length(state.quantum) - copy!(x, 1, state.quantum.data, 1, N) - copy!(x, N+1, state.classical, 1, length(state.classical)) + copyto!(x, 1, state.quantum.data, 1, N) + copyto!(x, N+1, state.classical, 1, length(state.classical)) x end -function recast!(x::SubArray{Complex128, 1}, state::State) +function recast!(x::SubArray{ComplexF64, 1}, state::State) N = length(state.quantum) - copy!(state.quantum.data, 1, x, 1, N) - copy!(state.classical, 1, x, N+1, length(state.classical)) + copyto!(state.quantum.data, 1, x, 1, N) + copyto!(state.classical, 1, x, N+1, length(state.classical)) end end # module diff --git a/src/subspace.jl b/src/subspace.jl index 503c9ede..0fb64a84 100644 --- a/src/subspace.jl +++ b/src/subspace.jl @@ -25,7 +25,7 @@ mutable struct SubspaceBasis <: Basis throw(ArgumentError("The basis of the basisstates has to be the superbasis.")) end end - basisstates_hash = hash([hash(x.data) for x=basisstates]) + basisstates_hash = hash(hash.([hash.(x.data) for x=basisstates])) new(Int[length(basisstates)], superbasis, basisstates, basisstates_hash) end end @@ -77,7 +77,7 @@ function projector(b1::SubspaceBasis, b2::Basis) if b1.superbasis != b2 throw(ArgumentError("Second basis has to be the superbasis of the first one.")) end - data = zeros(Complex128, length(b1), length(b2)) + data = zeros(ComplexF64, length(b1), length(b2)) for (i, state) = enumerate(b1.basisstates) data[i,:] = state.data end @@ -88,7 +88,7 @@ function projector(b1::Basis, b2::SubspaceBasis) if b1 != b2.superbasis throw(ArgumentError("First basis has to be the superbasis of the second one.")) end - data = zeros(Complex128, length(b1), length(b2)) + data = zeros(ComplexF64, length(b1), length(b2)) for (i, state) = enumerate(b2.basisstates) data[:,i] = state.data end diff --git a/src/superoperators.jl b/src/superoperators.jl index de31130d..86ed05ca 100644 --- a/src/superoperators.jl +++ b/src/superoperators.jl @@ -1,13 +1,14 @@ module superoperators export SuperOperator, DenseSuperOperator, SparseSuperOperator, - spre, spost, liouvillian, expm + spre, spost, liouvillian, exp import Base: ==, *, /, +, - import ..bases +import SparseArrays: sparse -using Compat using ..bases, ..operators, ..operators_dense, ..operators_sparse +using SparseArrays """ @@ -34,8 +35,8 @@ SuperOperator stored as dense matrix. mutable struct DenseSuperOperator <: SuperOperator basis_l::Tuple{Basis, Basis} basis_r::Tuple{Basis, Basis} - data::Matrix{Complex128} - function DenseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}, data::Matrix{Complex128}) + data::Matrix{ComplexF64} + function DenseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}, data::Matrix{ComplexF64}) if length(basis_l[1])*length(basis_l[2]) != size(data, 1) || length(basis_r[1])*length(basis_r[2]) != size(data, 2) throw(DimensionMismatch()) @@ -47,7 +48,7 @@ end function DenseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}) Nl = length(basis_l[1])*length(basis_l[2]) Nr = length(basis_r[1])*length(basis_r[2]) - data = zeros(Complex128, Nl, Nr) + data = zeros(ComplexF64, Nl, Nr) DenseSuperOperator(basis_l, basis_r, data) end @@ -60,8 +61,8 @@ SuperOperator stored as sparse matrix. mutable struct SparseSuperOperator <: SuperOperator basis_l::Tuple{Basis, Basis} basis_r::Tuple{Basis, Basis} - data::SparseMatrixCSC{Complex128, Int} - function SparseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}, data::SparseMatrixCSC{Complex128, Int}) + data::SparseMatrixCSC{ComplexF64, Int} + function SparseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}, data::SparseMatrixCSC{ComplexF64, Int}) if length(basis_l[1])*length(basis_l[2]) != size(data, 1) || length(basis_r[1])*length(basis_r[2]) != size(data, 2) throw(DimensionMismatch()) @@ -73,21 +74,21 @@ end function SparseSuperOperator(basis_l::Tuple{Basis, Basis}, basis_r::Tuple{Basis, Basis}) Nl = length(basis_l[1])*length(basis_l[2]) Nr = length(basis_r[1])*length(basis_r[2]) - data = spzeros(Complex128, Nl, Nr) + data = spzeros(ComplexF64, Nl, Nr) SparseSuperOperator(basis_l, basis_r, data) end -SuperOperator(basis_l, basis_r, data::SparseMatrixCSC{Complex128, Int}) = SparseSuperOperator(basis_l, basis_r, data) -SuperOperator(basis_l, basis_r, data::Matrix{Complex128}) = DenseSuperOperator(basis_l, basis_r, data) +SuperOperator(basis_l, basis_r, data::SparseMatrixCSC{ComplexF64, Int}) = SparseSuperOperator(basis_l, basis_r, data) +SuperOperator(basis_l, basis_r, data::Matrix{ComplexF64}) = DenseSuperOperator(basis_l, basis_r, data) Base.copy(a::T) where {T<:SuperOperator} = T(a.basis_l, a.basis_r, copy(a.data)) -Base.full(a::SparseSuperOperator) = DenseSuperOperator(a.basis_l, a.basis_r, full(a.data)) -Base.full(a::DenseSuperOperator) = copy(a) +operators.dense(a::SparseSuperOperator) = DenseSuperOperator(a.basis_l, a.basis_r, Matrix(a.data)) +operators.dense(a::DenseSuperOperator) = copy(a) -Base.sparse(a::DenseSuperOperator) = SparseSuperOperator(a.basis_l, a.basis_r, sparse(a.data)) -Base.sparse(a::SparseSuperOperator) = copy(a) +sparse(a::DenseSuperOperator) = SparseSuperOperator(a.basis_l, a.basis_r, sparse(a.data)) +sparse(a::SparseSuperOperator) = copy(a) ==(a::T, b::T) where {T<:SuperOperator} = samebases(a, b) && (a.data == b.data) @@ -151,7 +152,7 @@ For operators ``A``, ``B`` the relation holds. `op` can be a dense or a sparse operator. """ -spost(op::Operator) = SuperOperator((op.basis_r, op.basis_r), (op.basis_l, op.basis_l), kron(transpose(op.data), identityoperator(op).data)) +spost(op::Operator) = SuperOperator((op.basis_r, op.basis_r), (op.basis_l, op.basis_l), kron(permutedims(op.data), identityoperator(op).data)) function _check_input(H::Operator, J::Vector, Jdagger::Vector, rates::Union{Vector{Float64}, Matrix{Float64}}) @@ -212,10 +213,10 @@ function liouvillian(H::T, J::Vector{T}; end """ - expm(op::DenseSuperOperator) + exp(op::DenseSuperOperator) Operator exponential which can for example used to calculate time evolutions. """ -Base.expm(op::DenseSuperOperator) = DenseSuperOperator(op.basis_l, op.basis_r, expm(op.data)) +Base.exp(op::DenseSuperOperator) = DenseSuperOperator(op.basis_l, op.basis_r, exp(op.data)) end # module diff --git a/src/timecorrelations.jl b/src/timecorrelations.jl index cedabb23..ca31897d 100644 --- a/src/timecorrelations.jl +++ b/src/timecorrelations.jl @@ -5,6 +5,8 @@ export correlation, spectrum, correlation2spectrum using ..states, ..operators, ..operators_dense using ..metrics, ..timeevolution, ..steadystate +using FFTW + """ timecorrelations.correlation([tspan, ]rho0, H, J, op1, op2; ) @@ -33,7 +35,7 @@ criterion specified in [`steadystate.master`](@ref). """ function correlation(tspan::Vector{Float64}, rho0::DenseOperator, H::Operator, J::Vector, op1::Operator, op2::Operator; - rates::Union{Vector{Float64}, Matrix{Float64}, Void}=nothing, + rates::Union{Vector{Float64}, Matrix{Float64}, Nothing}=nothing, Jdagger::Vector=dagger.(J), kwargs...) function fout(t, rho) @@ -47,7 +49,7 @@ end function correlation(rho0::DenseOperator, H::Operator, J::Vector, op1::Operator, op2::Operator; tol::Float64=1e-4, h0=10., - rates::Union{Vector{Float64}, Matrix{Float64}, Void}=nothing, + rates::Union{Vector{Float64}, Matrix{Float64}, Nothing}=nothing, Jdagger::Vector=dagger.(J), kwargs...) op2rho0 = op2*rho0 @@ -125,16 +127,16 @@ end """ - timecorrelations.correlation2spectrum(tspan, corr; normalize) + timecorrelations.correlation2spectrum(tspan, corr; normalize_spec) Calculate spectrum as Fourier transform of a correlation function with a given correlation function. # Arguments * `tspan`: List of time points corresponding to the correlation function. * `corr`: Correlation function of which the Fourier transform is to be calculated. -* `normalize`: Specify if spectrum should be normalized to its maximum. +* `normalize_spec`: Specify if spectrum should be normalized to its maximum. """ -function correlation2spectrum(tspan::Vector{Float64}, corr::Vector{T}; normalize::Bool=false) where T <: Number +function correlation2spectrum(tspan::Vector{Float64}, corr::Vector{T}; normalize_spec::Bool=false) where T <: Number n = length(tspan) if length(corr) != n ArgumentError("tspan and corr must be of same length!") @@ -152,7 +154,7 @@ function correlation2spectrum(tspan::Vector{Float64}, corr::Vector{T}; normalize omega .*= 2pi/tmax spec = 2dt.*fftshift(real(fft(corr))) - omega, normalize ? spec./maximum(spec) : spec + omega, normalize_spec ? spec./maximum(spec) : spec end diff --git a/src/timeevolution_base.jl b/src/timeevolution_base.jl index ebb1b981..4f55a304 100644 --- a/src/timeevolution_base.jl +++ b/src/timeevolution_base.jl @@ -2,12 +2,12 @@ using ..metrics import OrdinaryDiffEq, DiffEqCallbacks, StochasticDiffEq -const DiffArray = Union{Vector{Complex128}, Array{Complex128, 2}} +const DiffArray = Union{Vector{ComplexF64}, Array{ComplexF64, 2}} function recast! end """ - integrate(tspan::Vector{Float64}, df::Function, x0::Vector{Complex128}, + integrate(tspan::Vector{Float64}, df::Function, x0::Vector{ComplexF64}, state::T, dstate::T, fout::Function; kwargs...) Integrate using OrdinaryDiffEq @@ -67,7 +67,7 @@ function integrate(tspan::Vector{Float64}, df::Function, x0::DiffArray, end function integrate(tspan::Vector{Float64}, df::Function, x0::DiffArray, - state::T, dstate::T, ::Void; kwargs...) where T + state::T, dstate::T, ::Nothing; kwargs...) where T function fout(t::Float64, state::T) copy(state) end @@ -90,12 +90,12 @@ end """ - integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Vector{Function}, x0::Vector{Complex128}, + integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Vector{Function}, x0::Vector{ComplexF64}, state::T, dstate::T, fout::Function; kwargs...) Integrate using StochasticDiffEq """ -function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0::Vector{Complex128}, +function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0::Vector{ComplexF64}, state::T, dstate::T, fout::Function, n::Int; save_everystep = false, callback=nothing, alg::StochasticDiffEq.StochasticDiffEqAlgorithm=StochasticDiffEq.EM(), @@ -105,33 +105,33 @@ function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0: ncb=nothing, kwargs...) where T - function df_(dx::Vector{Complex128}, x::Vector{Complex128}, p, t) + function df_(dx::Vector{ComplexF64}, x::Vector{ComplexF64}, p, t) recast!(x, state) recast!(dx, dstate) df(t, state, dstate) recast!(dstate, dx) end - function dg_(dx::Union{Vector{Complex128}, Array{Complex128, 2}}, - x::Vector{Complex128}, p, t) + function dg_(dx::Union{Vector{ComplexF64}, Array{ComplexF64, 2}}, + x::Vector{ComplexF64}, p, t) recast!(x, state) dg(dx, t, state, dstate, n) end - function fout_(x::Vector{Complex128}, t::Float64, integrator) + function fout_(x::Vector{ComplexF64}, t::Float64, integrator) recast!(x, state) fout(t, state) end - nc = isa(noise_prototype_classical, Void) ? 0 : size(noise_prototype_classical)[2] - if isa(noise, Void) && n > 0 + nc = isa(noise_prototype_classical, Nothing) ? 0 : size(noise_prototype_classical)[2] + if isa(noise, Nothing) && n > 0 noise_ = StochasticDiffEq.RealWienerProcess!(0.0, randn(n + nc)) else noise_ = noise end - if isa(noise_rate_prototype, Void) + if isa(noise_rate_prototype, Nothing) if n > 1 || nc > 1 || (n > 0 && nc > 0) - noise_rate_prototype = zeros(Complex128, length(x0), n + nc) + noise_rate_prototype = zeros(ComplexF64, length(x0), n + nc) end end @@ -166,8 +166,8 @@ end Define fout if it was omitted. """ -function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0::Vector{Complex128}, - state::T, dstate::T, ::Void, n::Int; kwargs...) where T +function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0::Vector{ComplexF64}, + state::T, dstate::T, ::Nothing, n::Int; kwargs...) where T function fout(t::Float64, state::T) copy(state) end @@ -175,4 +175,4 @@ function integrate_stoch(tspan::Vector{Float64}, df::Function, dg::Function, x0: end -Base.@pure pure_inference(fout,T) = Core.Inference.return_type(fout, T) +Base.@pure pure_inference(fout,T) = Core.Compiler.return_type(fout, T) diff --git a/src/transformations.jl b/src/transformations.jl index 98eb1fde..07fbb210 100644 --- a/src/transformations.jl +++ b/src/transformations.jl @@ -25,11 +25,11 @@ a harmonic trap potential at position ``x``, i.e.: ``` """ function transform(b1::PositionBasis, b2::FockBasis; x0::Real=1) - T = Matrix{Complex128}(length(b1), length(b2)) + T = Matrix{ComplexF64}(undef, length(b1), length(b2)) xvec = samplepoints(b1) A = hermite.A(b2.N) delta_x = particle.spacing(b1) - c = 1./sqrt(x0*sqrt(pi))*sqrt(delta_x) + c = 1.0/sqrt(x0*sqrt(pi))*sqrt(delta_x) for i in 1:length(b1) u = xvec[i]/x0 C = c*exp(-u^2/2) @@ -41,11 +41,11 @@ function transform(b1::PositionBasis, b2::FockBasis; x0::Real=1) end function transform(b1::FockBasis, b2::PositionBasis; x0::Real=1) - T = Matrix{Complex128}(length(b1), length(b2)) + T = Matrix{ComplexF64}(undef, length(b1), length(b2)) xvec = samplepoints(b2) A = hermite.A(b1.N) delta_x = particle.spacing(b2) - c = 1./sqrt(x0*sqrt(pi))*sqrt(delta_x) + c = 1.0/sqrt(x0*sqrt(pi))*sqrt(delta_x) for i in 1:length(b2) u = xvec[i]/x0 C = c*exp(-u^2/2) @@ -57,4 +57,3 @@ function transform(b1::FockBasis, b2::PositionBasis; x0::Real=1) end end # module - diff --git a/test/test_bases.jl b/test/test_bases.jl index 53c68f9b..8f7fc87f 100644 --- a/test/test_bases.jl +++ b/test/test_bases.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "basis" begin diff --git a/test/test_embed.jl b/test/test_embed.jl index e82c3233..0078b879 100644 --- a/test/test_embed.jl +++ b/test/test_embed.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using Random, SparseArrays, LinearAlgebra @testset "embed" begin @@ -12,15 +13,15 @@ b1 = NLevelBasis(3) b2 = SpinBasis(1//2) b3 = FockBasis(2) -I1 = full(identityoperator(b1)) -I2 = full(identityoperator(b2)) -I3 = full(identityoperator(b3)) +I1 = dense(identityoperator(b1)) +I2 = dense(identityoperator(b2)) +I3 = dense(identityoperator(b3)) b = b1 ⊗ b2 ⊗ b3 -op1 = DenseOperator(b1, b1, rand(Complex128, length(b1), length(b1))) -op2 = DenseOperator(b2, b2, rand(Complex128, length(b2), length(b2))) -op3 = DenseOperator(b3, b3, rand(Complex128, length(b3), length(b3))) +op1 = DenseOperator(b1, b1, rand(ComplexF64, length(b1), length(b1))) +op2 = DenseOperator(b2, b2, rand(ComplexF64, length(b2), length(b2))) +op3 = DenseOperator(b3, b3, rand(ComplexF64, length(b3), length(b3))) # Test Vector{Int}, Vector{Operator} @@ -30,7 +31,7 @@ y = op1 ⊗ op2 ⊗ I3 x = embed(b, [1,2], [sparse(op1), sparse(op2)]) y = op1 ⊗ op2 ⊗ I3 -@test 0 ≈ abs(tracedistance_nh(full(x), y)) +@test 0 ≈ abs(tracedistance_nh(dense(x), y)) x = embed(b, 1, op1) y = op1 ⊗ I2 ⊗ I3 @@ -48,7 +49,7 @@ y = I1 ⊗ I2 ⊗ op3 # Test Dict(Int=>Operator) x = embed(b, Dict(1 => sparse(op1), 2 => sparse(op2))) y = op1 ⊗ op2 ⊗ I3 -@test 0 ≈ abs(tracedistance_nh(full(x), y)) +@test 0 ≈ abs(tracedistance_nh(dense(x), y)) x = embed(b, Dict(1 => op1, 2 => op2)) y = op1 ⊗ op2 ⊗ I3 @@ -56,7 +57,7 @@ y = op1 ⊗ op2 ⊗ I3 x = embed(b, Dict([1,3] => sparse(op1⊗op3))) y = op1 ⊗ I2 ⊗ op3 -@test 0 ≈ abs(tracedistance_nh(full(x), y)) +@test 0 ≈ abs(tracedistance_nh(dense(x), y)) x = embed(b, Dict([1,3] => op1⊗op3)) y = op1 ⊗ I2 ⊗ op3 @@ -64,7 +65,7 @@ y = op1 ⊗ I2 ⊗ op3 x = embed(b, Dict([3,1] => sparse(op3⊗op1))) y = op1 ⊗ I2 ⊗ op3 -@test 0 ≈ abs(tracedistance_nh(full(x), y)) +@test 0 ≈ abs(tracedistance_nh(dense(x), y)) x = embed(b, Dict([3,1] => op3⊗op1)) y = op1 ⊗ I2 ⊗ op3 diff --git a/test/test_fock.jl b/test/test_fock.jl index f038c5b8..0e6c5d92 100644 --- a/test/test_fock.jl +++ b/test/test_fock.jl @@ -1,13 +1,14 @@ -using Base.Test +using Test using QuantumOptics +using Random, SparseArrays, LinearAlgebra @testset "fock" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) -randstate(b) = normalize(Ket(b, rand(Complex128, length(b)))) -randop(bl, br) = DenseOperator(bl, br, rand(Complex128, length(bl), length(br))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) +randstate(b) = normalize(Ket(b, rand(ComplexF64, length(b)))) +randop(bl, br) = DenseOperator(bl, br, rand(ComplexF64, length(bl), length(br))) randop(b) = randop(b, b) basis = FockBasis(2) @@ -22,9 +23,9 @@ basis = FockBasis(2) @test FockBasis(2) != FockBasis(3) # Test operators -@test number(basis) == SparseOperator(basis, spdiagm(Complex128[0, 1, 2])) -@test destroy(basis) == SparseOperator(basis, sparse(Complex128[0 1 0; 0 0 sqrt(2); 0 0 0])) -@test create(basis) == SparseOperator(basis, sparse(Complex128[0 0 0; 1 0 0; 0 sqrt(2) 0])) +@test number(basis) == SparseOperator(basis, sparse(Diagonal(ComplexF64[0, 1, 2]))) +@test destroy(basis) == SparseOperator(basis, sparse(ComplexF64[0 1 0; 0 0 sqrt(2); 0 0 0])) +@test create(basis) == SparseOperator(basis, sparse(ComplexF64[0 0 0; 1 0 0; 0 sqrt(2) 0])) @test number(basis) == dagger(number(basis)) @test create(basis) == dagger(destroy(basis)) @test destroy(basis) == dagger(create(basis)) diff --git a/test/test_manybody.jl b/test/test_manybody.jl index cd262092..170e6679 100644 --- a/test/test_manybody.jl +++ b/test/test_manybody.jl @@ -1,11 +1,12 @@ -using Base.Test +using Test using QuantumOptics +using Random, SparseArrays, LinearAlgebra @testset "manybody" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) # Test state creation diff --git a/test/test_metrics.jl b/test/test_metrics.jl index 9d6dfe84..645bad1a 100644 --- a/test/test_metrics.jl +++ b/test/test_metrics.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using SparseArrays, LinearAlgebra @testset "metrics" begin @@ -54,7 +55,7 @@ rho = spinup(b1) ⊗ dagger(coherentstate(b2, 0.1)) @test tracedistance_nh(rho, rho) ≈ 0. # entropy -rho_mix = full(identityoperator(b1))/2. +rho_mix = dense(identityoperator(b1))/2. @test entropy_vn(rho_mix)/log(2) ≈ 1 psi = coherentstate(FockBasis(20), 2.0) @test isapprox(entropy_vn(psi), 0.0, atol=1e-8) diff --git a/test/test_nlevel.jl b/test/test_nlevel.jl index e20c3cc2..e2274f85 100644 --- a/test/test_nlevel.jl +++ b/test/test_nlevel.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "nlevel" begin @@ -16,7 +17,7 @@ b = NLevelBasis(N) @test_throws BoundsError transition(b, N+1, 1) @test_throws BoundsError transition(b, 1, 0) @test_throws BoundsError transition(b, 1, N+1) -@test full(transition(b, 2, 1)) == basisstate(b, 2) ⊗ dagger(basisstate(b, 1)) +@test dense(transition(b, 2, 1)) == basisstate(b, 2) ⊗ dagger(basisstate(b, 1)) # Test nlevel states @test_throws BoundsError nlevelstate(b, 0) diff --git a/test/test_operators.jl b/test/test_operators.jl index 0c98438a..6d792513 100644 --- a/test/test_operators.jl +++ b/test/test_operators.jl @@ -1,11 +1,11 @@ -using Base.Test +using Test using QuantumOptics - +using LinearAlgebra, SparseArrays, Random mutable struct test_operators <: Operator basis_l::Basis basis_r::Basis - data::Matrix{Complex128} + data::Matrix{ComplexF64} test_operators(b1::Basis, b2::Basis, data) = length(b1) == size(data, 1) && length(b2) == size(data, 2) ? new(b1, b2, data) : throw(DimensionMismatch()) end @@ -37,10 +37,10 @@ op_test3 = test_operators(b1 ⊗ b2, b2 ⊗ b1, randoperator(b, b).data) @test_throws ArgumentError dagger(op_test) @test_throws ArgumentError identityoperator(test_operators, b, b) -@test_throws ArgumentError trace(op_test) +@test_throws ArgumentError tr(op_test) @test_throws ArgumentError ptrace(op_test, [1]) @test_throws ArgumentError ishermitian(op_test) -@test_throws ArgumentError full(op_test) +@test_throws ArgumentError dense(op_test) @test_throws ArgumentError sparse(op_test) @test expect(1, op1, ρ) ≈ expect(embed(b, 1, op1), ρ) @@ -63,10 +63,10 @@ op_test3 = test_operators(b1 ⊗ b2, b2 ⊗ b1, randoperator(b, b).data) @test_throws ErrorException QuantumOptics.operators.gemm!() @test_throws ErrorException QuantumOptics.operators.gemv!() -@test_throws ArgumentError expm(sparse(op1)) +@test_throws ArgumentError exp(sparse(op1)) -@test one(b1).data == diagm(ones(b1.shape[1])) -@test one(op1).data == diagm(ones(b1.shape[1])) +@test one(b1).data == Diagonal(ones(b1.shape[1])) +@test one(op1).data == Diagonal(ones(b1.shape[1])) @test_throws ArgumentError conj!(op_test) diff --git a/test/test_operators_dense.jl b/test/test_operators_dense.jl index 459f5745..64573334 100644 --- a/test/test_operators_dense.jl +++ b/test/test_operators_dense.jl @@ -1,11 +1,12 @@ -using Base.Test +using Test using QuantumOptics +using Random, SparseArrays, LinearAlgebra @testset "operators-dense" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) b1a = GenericBasis(2) @@ -41,11 +42,11 @@ op1 = randoperator(b_l, b_r) op2 = randoperator(b_l, b_r) op3 = randoperator(b_l, b_r) -x1 = Ket(b_r, rand(Complex128, length(b_r))) -x2 = Ket(b_r, rand(Complex128, length(b_r))) +x1 = Ket(b_r, rand(ComplexF64, length(b_r))) +x2 = Ket(b_r, rand(ComplexF64, length(b_r))) -xbra1 = Bra(b_l, rand(Complex128, length(b_l))) -xbra2 = Bra(b_l, rand(Complex128, length(b_l))) +xbra1 = Bra(b_l, rand(ComplexF64, length(b_l))) +xbra2 = Bra(b_l, rand(ComplexF64, length(b_l))) # Addition @test_throws bases.IncompatibleBases op1 + dagger(op2) @@ -121,27 +122,27 @@ op123 = op1a ⊗ op2a ⊗ op3a @test 1e-13 > D(dagger(op1a ⊗ op2a), dagger(op1a) ⊗ dagger(op2a)) # Internal layout -a = Ket(b1a, rand(Complex128, length(b1a))) -b = Ket(b2b, rand(Complex128, length(b2b))) +a = Ket(b1a, rand(ComplexF64, length(b1a))) +b = Ket(b2b, rand(ComplexF64, length(b2b))) ab = a ⊗ dagger(b) @test ab.data[2,3] == a.data[2]*conj(b.data[3]) @test ab.data[2,1] == a.data[2]*conj(b.data[1]) shape = tuple(op123.basis_l.shape..., op123.basis_r.shape...) -idx = sub2ind(shape, 2, 1, 1, 3, 4, 5) +idx = LinearIndices(shape)[2, 1, 1, 3, 4, 5] @test op123.data[idx] == op1a.data[2,3]*op2a.data[1,4]*op3a.data[1,5] @test reshape(op123.data, shape...)[2, 1, 1, 3, 4, 5] == op1a.data[2,3]*op2a.data[1,4]*op3a.data[1,5] -idx = sub2ind(shape, 2, 1, 1, 1, 3, 4) +idx = LinearIndices(shape)[2, 1, 1, 1, 3, 4] @test op123.data[idx] == op1a.data[2,1]*op2a.data[1,3]*op3a.data[1,4] @test reshape(op123.data, shape...)[2, 1, 1, 1, 3, 4] == op1a.data[2,1]*op2a.data[1,3]*op3a.data[1,4] # Test identityoperator -x1 = Ket(b_r, rand(Complex128, length(b_r))) -x2 = Ket(b_r, rand(Complex128, length(b_r))) -xbra1 = Bra(b_l, rand(Complex128, length(b_l))) -xbra2 = Bra(b_l, rand(Complex128, length(b_l))) +x1 = Ket(b_r, rand(ComplexF64, length(b_r))) +x2 = Ket(b_r, rand(ComplexF64, length(b_r))) +xbra1 = Bra(b_l, rand(ComplexF64, length(b_l))) +xbra2 = Bra(b_l, rand(ComplexF64, length(b_l))) I = identityoperator(DenseOperator, b_r) @test isa(I, DenseOperator) @@ -155,18 +156,18 @@ I = identityoperator(DenseOperator, b_l) @test 1e-11 > D(xbra1*I, xbra1) @test I == identityoperator(DenseOperator, b1a) ⊗ identityoperator(DenseOperator, b2a) ⊗ identityoperator(DenseOperator, b3a) -# Test trace and normalize +# Test tr and normalize op = DenseOperator(GenericBasis(3), [1 3 2;5 2 2;-1 2 5]) -@test 8 == trace(op) +@test 8 == tr(op) op_normalized = normalize(op) -@test 8 == trace(op) -@test 1 == trace(op_normalized) +@test 8 == tr(op) +@test 1 == tr(op_normalized) op_copy = deepcopy(op) normalize!(op_copy) -@test trace(op) != trace(op_copy) -@test 1 ≈ trace(op_copy) +@test tr(op) != tr(op_copy) +@test 1 ≈ tr(op_copy) -# Test partial trace of state vectors +# Test partial tr of state vectors psi1 = 0.1*randstate(b1a) psi2 = 0.3*randstate(b2a) psi3 = 0.7*randstate(b3a) @@ -188,7 +189,7 @@ psi123 = psi1 ⊗ psi2 ⊗ psi3 @test_throws ArgumentError ptrace(psi123, [1, 2, 3]) -# Test partial trace of operators +# Test partial tr of operators b1 = GenericBasis(3) b2 = GenericBasis(5) b3 = GenericBasis(7) @@ -197,13 +198,13 @@ op2 = randoperator(b2) op3 = randoperator(b3) op123 = op1 ⊗ op2 ⊗ op3 -@test 1e-13 > D(op1⊗op2*trace(op3), ptrace(op123, 3)) -@test 1e-13 > D(op1⊗op3*trace(op2), ptrace(op123, 2)) -@test 1e-13 > D(op2⊗op3*trace(op1), ptrace(op123, 1)) +@test 1e-13 > D(op1⊗op2*tr(op3), ptrace(op123, 3)) +@test 1e-13 > D(op1⊗op3*tr(op2), ptrace(op123, 2)) +@test 1e-13 > D(op2⊗op3*tr(op1), ptrace(op123, 1)) -@test 1e-13 > D(op1*trace(op2)*trace(op3), ptrace(op123, [2,3])) -@test 1e-13 > D(op2*trace(op1)*trace(op3), ptrace(op123, [1,3])) -@test 1e-13 > D(op3*trace(op1)*trace(op2), ptrace(op123, [1,2])) +@test 1e-13 > D(op1*tr(op2)*tr(op3), ptrace(op123, [2,3])) +@test 1e-13 > D(op2*tr(op1)*tr(op3), ptrace(op123, [1,3])) +@test 1e-13 > D(op3*tr(op1)*tr(op2), ptrace(op123, [1,2])) @test_throws ArgumentError ptrace(op123, [1,2,3]) x = randoperator(b1, b1⊗b2) @@ -218,7 +219,7 @@ x = randoperator(b1⊗b2, b2⊗b1) op1 = randoperator(b1, b2) op2 = randoperator(b3) -@test 1e-13 > D(op1*trace(op2), ptrace(op1⊗op2, 2)) +@test 1e-13 > D(op1*tr(op2), ptrace(op1⊗op2, 2)) # Test expect b1 = GenericBasis(3) @@ -237,7 +238,7 @@ state = randstate(b_l) @test expect(3, op3, state) ≈ expect(op3, ptrace(state, [1, 2])) state = randoperator(b_l) -@test expect(op123, state) ≈ trace(op123*state) +@test expect(op123, state) ≈ tr(op123*state) @test expect(1, op1, state) ≈ expect(op1, ptrace(state, [2, 3])) @test expect(2, op2, state) ≈ expect(op2, ptrace(state, [1, 3])) @test expect(3, op3, state) ≈ expect(op3, ptrace(state, [1, 2])) @@ -265,8 +266,8 @@ op321 = op3⊗op2⊗op1 # Test projector -xket = normalize(Ket(b_l, rand(Complex128, length(b_l)))) -yket = normalize(Ket(b_l, rand(Complex128, length(b_l)))) +xket = normalize(Ket(b_l, rand(ComplexF64, length(b_l)))) +yket = normalize(Ket(b_l, rand(ComplexF64, length(b_l)))) xbra = dagger(xket) ybra = dagger(yket) diff --git a/test/test_operators_lazyproduct.jl b/test/test_operators_lazyproduct.jl index 95a15051..918358cb 100644 --- a/test/test_operators_lazyproduct.jl +++ b/test/test_operators_lazyproduct.jl @@ -1,12 +1,13 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra, Random @testset "operators-lazyproduct" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) b1a = GenericBasis(2) @@ -34,10 +35,10 @@ op2.operators[1].data[1,1] = complex(10.) op2.factor = 3. @test op2.factor != op1.factor -# Test full & sparse +# Test dense & sparse op1 = randoperator(b_l, b_r) op2 = randoperator(b_r, b_l) -@test 0.1*(op1*op2) == full(LazyProduct([sparse(op1), sparse(op2)], 0.1)) +@test 0.1*(op1*op2) == dense(LazyProduct([sparse(op1), sparse(op2)], 0.1)) @test 0.1*(sparse(op1)*sparse(op2)) == sparse(LazyProduct([op1, op2], 0.1)) @@ -55,10 +56,10 @@ op2_ = 0.3*(op2a*op2b) op3 = LazyProduct(op3a) op3_ = op3a -x1 = Ket(b_l, rand(Complex128, length(b_l))) -x2 = Ket(b_l, rand(Complex128, length(b_l))) -xbra1 = Bra(b_l, rand(Complex128, length(b_l))) -xbra2 = Bra(b_l, rand(Complex128, length(b_l))) +x1 = Ket(b_l, rand(ComplexF64, length(b_l))) +x2 = Ket(b_l, rand(ComplexF64, length(b_l))) +xbra1 = Bra(b_l, rand(ComplexF64, length(b_l))) +xbra2 = Bra(b_l, rand(ComplexF64, length(b_l))) # Addition @test_throws ArgumentError op1 + op2 @@ -80,15 +81,15 @@ xbra2 = Bra(b_l, rand(Complex128, length(b_l))) Idense = identityoperator(DenseOperator, b_l) I = identityoperator(LazyProduct, b_l) @test isa(I, LazyProduct) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(I*x1, x1) @test 1e-11 > D(xbra1*I, xbra1) -# Test trace and normalize +# Test tr and normalize op1 = randoperator(b_l) op2 = randoperator(b_l) op = LazyProduct(op1, op2) -@test_throws ArgumentError trace(op) +@test_throws ArgumentError tr(op) @test_throws ArgumentError ptrace(op, [1, 2]) @test_throws ArgumentError normalize(op) @test_throws ArgumentError normalize!(op) @@ -99,10 +100,10 @@ op2 = randoperator(b_l) op = 0.3*LazyProduct(op1, sparse(op2)) op_ = 0.3*op1*op2 -state = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_l, rand(ComplexF64, length(b_l))) @test expect(op, state) ≈ expect(op_, state) -state = DenseOperator(b_l, b_l, rand(Complex128, length(b_l), length(b_l))) +state = DenseOperator(b_l, b_l, rand(ComplexF64, length(b_l), length(b_l))) @test expect(op, state) ≈ expect(op_, state) # Permute systems @@ -126,8 +127,8 @@ op3 = randoperator(b_l, b_r) op = LazyProduct([op1, sparse(op2), op3], 0.2) op_ = 0.2*op1*op2*op3 -state = Ket(b_r, rand(Complex128, length(b_r))) -result_ = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_r, rand(ComplexF64, length(b_r))) +result_ = Ket(b_l, rand(ComplexF64, length(b_l))) result = copy(result_) operators.gemv!(complex(1.), op, state, complex(0.), result) @test 1e-11 > D(result, op_*state) @@ -138,8 +139,8 @@ beta = complex(2.1) operators.gemv!(alpha, op, state, beta, result) @test 1e-11 > D(result, alpha*op_*state + beta*result_) -state = Bra(b_l, rand(Complex128, length(b_l))) -result_ = Bra(b_r, rand(Complex128, length(b_r))) +state = Bra(b_l, rand(ComplexF64, length(b_l))) +result_ = Bra(b_r, rand(ComplexF64, length(b_r))) result = copy(result_) operators.gemv!(complex(1.), state, op, complex(0.), result) @test 1e-11 > D(result, state*op_) diff --git a/test/test_operators_lazysum.jl b/test/test_operators_lazysum.jl index f648f0fb..c8a928ff 100644 --- a/test/test_operators_lazysum.jl +++ b/test/test_operators_lazysum.jl @@ -1,12 +1,13 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra, Random @testset "operators-lazysum" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) b1a = GenericBasis(2) @@ -35,10 +36,10 @@ op2.operators[1].data[1,1] = complex(10.) op2.factors[1] = 3. @test op2.factors[1] != op1.factors[1] -# Test full & sparse +# Test dense & sparse op1 = randoperator(b_l, b_r) op2 = sparse(randoperator(b_l, b_r)) -@test 0.1*op1 + 0.3*full(op2) == full(LazySum([0.1, 0.3], [op1, op2])) +@test 0.1*op1 + 0.3*dense(op2) == dense(LazySum([0.1, 0.3], [op1, op2])) @test 0.1*sparse(op1) + 0.3*op2 == sparse(LazySum([0.1, 0.3], [op1, op2])) @@ -56,9 +57,9 @@ op2_ = 0.7*op2a + 0.9*op2b op3 = LazySum(op3a) op3_ = op3a -x1 = Ket(b_r, rand(Complex128, length(b_r))) -x2 = Ket(b_r, rand(Complex128, length(b_r))) -xbra1 = Bra(b_l, rand(Complex128, length(b_l))) +x1 = Ket(b_r, rand(ComplexF64, length(b_r))) +x2 = Ket(b_r, rand(ComplexF64, length(b_r))) +xbra1 = Bra(b_l, rand(ComplexF64, length(b_l))) # Addition @test_throws bases.IncompatibleBases op1 + dagger(op2) @@ -86,32 +87,32 @@ xbra1 = Bra(b_l, rand(Complex128, length(b_l))) Idense = identityoperator(DenseOperator, b_r) I = identityoperator(LazySum, b_r) @test isa(I, LazySum) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(I*x1, x1) Idense = identityoperator(DenseOperator, b_l) I = identityoperator(LazySum, b_l) @test isa(I, LazySum) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(xbra1*I, xbra1) -# Test trace and normalize +# Test tr and normalize op1 = randoperator(b_l) op2 = randoperator(b_l) op3 = randoperator(b_l) op = LazySum([0.1, 0.3, 1.2], [op1, op2, op3]) op_ = 0.1*op1 + 0.3*op2 + 1.2*op3 -@test trace(op_) ≈ trace(op) +@test tr(op_) ≈ tr(op) op_normalized = normalize(op) -@test trace(op_) ≈ trace(op) -@test 1 ≈ trace(op_normalized) +@test tr(op_) ≈ tr(op) +@test 1 ≈ tr(op_normalized) op_copy = deepcopy(op) normalize!(op_copy) -@test trace(op) != trace(op_copy) -@test 1 ≈ trace(op_copy) +@test tr(op) != tr(op_copy) +@test 1 ≈ tr(op_copy) -# Test partial trace +# Test partial tr op1 = randoperator(b_l) op2 = randoperator(b_l) op3 = randoperator(b_l) @@ -129,10 +130,10 @@ op123_ = 0.1*op1 + 0.3*op2 + 1.2*op3 @test_throws ArgumentError ptrace(op123, [1,2,3]) # Test expect -state = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_l, rand(ComplexF64, length(b_l))) @test expect(op123, state) ≈ expect(op123_, state) -state = DenseOperator(b_l, b_l, rand(Complex128, length(b_l), length(b_l))) +state = DenseOperator(b_l, b_l, rand(ComplexF64, length(b_l), length(b_l))) @test expect(op123, state) ≈ expect(op123_, state) # Permute systems @@ -165,8 +166,8 @@ op3 = randoperator(b_l, b_r) op = LazySum([0.1, 0.3, 1.2], [op1, op2, op3]) op_ = 0.1*op1 + 0.3*op2 + 1.2*op3 -state = Ket(b_r, rand(Complex128, length(b_r))) -result_ = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_r, rand(ComplexF64, length(b_r))) +result_ = Ket(b_l, rand(ComplexF64, length(b_l))) result = deepcopy(result_) operators.gemv!(complex(1.), op, state, complex(0.), result) @test 1e-13 > D(result, op_*state) @@ -177,8 +178,8 @@ beta = complex(2.1) operators.gemv!(alpha, op, state, beta, result) @test 1e-13 > D(result, alpha*op_*state + beta*result_) -state = Bra(b_l, rand(Complex128, length(b_l))) -result_ = Bra(b_r, rand(Complex128, length(b_r))) +state = Bra(b_l, rand(ComplexF64, length(b_l))) +result_ = Bra(b_r, rand(ComplexF64, length(b_r))) result = deepcopy(result_) operators.gemv!(complex(1.), state, op, complex(0.), result) @test 1e-13 > D(result, state*op_) diff --git a/test/test_operators_lazytensor.jl b/test/test_operators_lazytensor.jl index 465c6ab3..a1106395 100644 --- a/test/test_operators_lazytensor.jl +++ b/test/test_operators_lazytensor.jl @@ -1,10 +1,11 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra, SparseArrays, Random mutable struct test_lazytensor <: Operator basis_l::Basis basis_r::Basis - data::Matrix{Complex128} + data::Matrix{ComplexF64} test_lazytensor(b1::Basis, b2::Basis, data) = length(b1) == size(data, 1) && length(b2) == size(data, 2) ? new(b1, b2, data) : throw(DimensionMismatch()) end @@ -12,7 +13,7 @@ end srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) b1a = GenericBasis(2) @@ -52,10 +53,10 @@ x_.indices[2] = 100 @test x_.indices != x.indices -# Test full & sparse +# Test dense & sparse I2 = identityoperator(b2a, b2b) x = LazyTensor(b_l, b_r, [1, 3], [op1, sparse(op3)], 0.3) -@test 1e-12 > D(0.3*op1⊗full(I2)⊗op3, full(x)) +@test 1e-12 > D(0.3*op1⊗dense(I2)⊗op3, dense(x)) @test 1e-12 > D(0.3*sparse(op1)⊗I2⊗sparse(op3), sparse(x)) # Test suboperators @@ -69,9 +70,9 @@ x = LazyTensor(b_l, b_r, [1, 3], [op1, sparse(op3)], 0.3) subop1 = randoperator(b1a, b1b) subop2 = randoperator(b2a, b2b) subop3 = randoperator(b3a, b3b) -I1 = full(identityoperator(b1a, b1b)) -I2 = full(identityoperator(b2a, b2b)) -I3 = full(identityoperator(b3a, b3b)) +I1 = dense(identityoperator(b1a, b1b)) +I2 = dense(identityoperator(b2a, b2b)) +I3 = dense(identityoperator(b3a, b3b)) op1 = LazyTensor(b_l, b_r, [1, 3], [subop1, sparse(subop3)], 0.1) op1_ = 0.1*subop1 ⊗ I2 ⊗ subop3 op2 = LazyTensor(b_l, b_r, [2, 3], [sparse(subop2), subop3], 0.7) @@ -79,10 +80,10 @@ op2_ = 0.7*I1 ⊗ subop2 ⊗ subop3 op3 = 0.3*LazyTensor(b_l, b_r, 3, subop3) op3_ = 0.3*I1 ⊗ I2 ⊗ subop3 -x1 = Ket(b_r, rand(Complex128, length(b_r))) -x2 = Ket(b_r, rand(Complex128, length(b_r))) -xbra1 = Bra(b_l, rand(Complex128, length(b_l))) -xbra2 = Bra(b_l, rand(Complex128, length(b_l))) +x1 = Ket(b_r, rand(ComplexF64, length(b_r))) +x2 = Ket(b_r, rand(ComplexF64, length(b_r))) +xbra1 = Bra(b_l, rand(ComplexF64, length(b_l))) +xbra2 = Bra(b_l, rand(ComplexF64, length(b_l))) # Addition @test_throws ArgumentError op1 + op2 @@ -107,37 +108,37 @@ xbra2 = Bra(b_l, rand(Complex128, length(b_l))) Idense = identityoperator(DenseOperator, b_r) I = identityoperator(LazyTensor, b_r) @test isa(I, LazyTensor) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(I*x1, x1) @test I == identityoperator(LazyTensor, b1b) ⊗ identityoperator(LazyTensor, b2b) ⊗ identityoperator(LazyTensor, b3b) Idense = identityoperator(DenseOperator, b_l) I = identityoperator(LazyTensor, b_l) @test isa(I, LazyTensor) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(xbra1*I, xbra1) @test I == identityoperator(LazyTensor, b1a) ⊗ identityoperator(LazyTensor, b2a) ⊗ identityoperator(LazyTensor, b3a) -# Test trace and normalize +# Test tr and normalize subop1 = randoperator(b1a) -I2 = full(identityoperator(b2a)) +I2 = dense(identityoperator(b2a)) subop3 = randoperator(b3a) op = LazyTensor(b_l, b_l, [1, 3], [subop1, sparse(subop3)], 0.1) op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 -@test trace(op) ≈ trace(op_) +@test tr(op) ≈ tr(op_) op_normalized = normalize(op) -@test trace(op_) ≈ trace(op) -@test 1 ≈ trace(op_normalized) +@test tr(op_) ≈ tr(op) +@test 1 ≈ tr(op_normalized) op_copy = deepcopy(op) normalize!(op_copy) -@test trace(op) != trace(op_copy) -@test 1 ≈ trace(op_copy) +@test tr(op) != tr(op_copy) +@test 1 ≈ tr(op_copy) -# Test partial trace +# Test partial tr subop1 = randoperator(b1a) -I2 = full(identityoperator(b2a)) +I2 = dense(identityoperator(b2a)) subop3 = randoperator(b3a) op = LazyTensor(b_l, b_l, [1, 3], [subop1, sparse(subop3)], 0.1) op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 @@ -153,17 +154,17 @@ op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 @test_throws ArgumentError ptrace(op, [1,2,3]) # Test expect -state = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_l, rand(ComplexF64, length(b_l))) @test expect(op, state) ≈ expect(op_, state) -state = DenseOperator(b_l, b_l, rand(Complex128, length(b_l), length(b_l))) +state = DenseOperator(b_l, b_l, rand(ComplexF64, length(b_l), length(b_l))) @test expect(op, state) ≈ expect(op_, state) # Permute systems subop1 = randoperator(b1a, b1b) subop2 = randoperator(b2a, b2b) subop3 = randoperator(b3a, b3b) -I2 = full(identityoperator(b2a, b2b)) +I2 = dense(identityoperator(b2a, b2b)) op = LazyTensor(b_l, b_r, [1, 3], [subop1, sparse(subop3)])*0.1 op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 @@ -178,12 +179,12 @@ op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 subop1 = randoperator(b1a, b1b) subop2 = randoperator(b2a, b2b) subop3 = randoperator(b3a, b3b) -I2 = full(identityoperator(b2a, b2b)) +I2 = dense(identityoperator(b2a, b2b)) op = LazyTensor(b_l, b_r, [1, 3], [subop1, sparse(subop3)])*0.1 op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 -state = Ket(b_r, rand(Complex128, length(b_r))) -result_ = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_r, rand(ComplexF64, length(b_r))) +result_ = Ket(b_l, rand(ComplexF64, length(b_l))) result = deepcopy(result_) operators.gemv!(complex(1.), op, state, complex(0.), result) @test 1e-13 > D(result, op_*state) @@ -194,8 +195,8 @@ beta = complex(2.1) operators.gemv!(alpha, op, state, beta, result) @test 1e-13 > D(result, alpha*op_*state + beta*result_) -state = Bra(b_l, rand(Complex128, length(b_l))) -result_ = Bra(b_r, rand(Complex128, length(b_r))) +state = Bra(b_l, rand(ComplexF64, length(b_l))) +result_ = Bra(b_r, rand(ComplexF64, length(b_r))) result = deepcopy(result_) operators.gemv!(complex(1.), state, op, complex(0.), result) @test 1e-13 > D(result, state*op_) @@ -212,7 +213,7 @@ b_r2 = GenericBasis(13) subop1 = randoperator(b1a, b1b) subop2 = randoperator(b2a, b2b) subop3 = randoperator(b3a, b3b) -I2 = full(identityoperator(b2a, b2b)) +I2 = dense(identityoperator(b2a, b2b)) op = LazyTensor(b_l, b_r, [1, 3], [subop1, sparse(subop3)])*0.1 op_ = 0.1*subop1 ⊗ I2 ⊗ subop3 @@ -241,14 +242,14 @@ operators.gemm!(alpha, state, op, beta, result) @test 1e-12 > D(result, alpha*state*op_ + beta*result_) # Test calling gemv with non-complex factors -state = Ket(b_r, rand(Complex128, length(b_r))) -result_ = Ket(b_l, rand(Complex128, length(b_l))) +state = Ket(b_r, rand(ComplexF64, length(b_r))) +result_ = Ket(b_l, rand(ComplexF64, length(b_l))) result = deepcopy(result_) operators.gemv!(1, op, state, 0, result) @test 1e-13 > D(result, op_*state) -state = Bra(b_l, rand(Complex128, length(b_l))) -result_ = Bra(b_r, rand(Complex128, length(b_r))) +state = Bra(b_l, rand(ComplexF64, length(b_l))) +result_ = Bra(b_r, rand(ComplexF64, length(b_r))) result = deepcopy(result_) operators.gemv!(1, state, op, 0, result) @test 1e-13 > D(result, state*op_) diff --git a/test/test_operators_sparse.jl b/test/test_operators_sparse.jl index c16fd9b3..32584b4b 100644 --- a/test/test_operators_sparse.jl +++ b/test/test_operators_sparse.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using Random, SparseArrays, LinearAlgebra # Custom operator type for testing error msg @@ -10,7 +11,7 @@ mutable struct TestOperator <: Operator; end srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x1::StateVector, x2::StateVector) = norm(x2-x1) sprandop(b1, b2) = sparse(randoperator(b1, b2)) sprandop(b) = sprandop(b, b) @@ -26,8 +27,8 @@ b_l = b1a⊗b2a⊗b3a b_r = b1b⊗b2b⊗b3b # Test creation -@test_throws DimensionMismatch DenseOperator(b1a, spzeros(Complex128, 3, 2)) -@test_throws DimensionMismatch DenseOperator(b1a, b1b, spzeros(Complex128, 3, 2)) +@test_throws DimensionMismatch DenseOperator(b1a, spzeros(ComplexF64, 3, 2)) +@test_throws DimensionMismatch DenseOperator(b1a, b1b, spzeros(ComplexF64, 3, 2)) op1 = SparseOperator(b1a, b1b, sparse([1 1 1; 1 1 1])) op2 = sparse(DenseOperator(b1b, b1a, [1 1; 1 1; 1 1])) @test op1 == dagger(op2) @@ -45,9 +46,9 @@ op_zero = SparseOperator(b_l, b_r) op1 = sprandop(b_l, b_r) op2 = sprandop(b_l, b_r) op3 = sprandop(b_l, b_r) -op1_ = full(op1) -op2_ = full(op2) -op3_ = full(op3) +op1_ = dense(op1) +op2_ = dense(op2) +op3_ = dense(op3) x1 = randstate(b_r) x2 = randstate(b_r) @@ -96,29 +97,29 @@ conj!(tmp) Idense = identityoperator(DenseOperator, b_r) I = identityoperator(SparseOperator, b_r) @test isa(I, SparseOperator) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(I*x1, x1) @test I == identityoperator(SparseOperator, b1b) ⊗ identityoperator(SparseOperator, b2b) ⊗ identityoperator(SparseOperator, b3b) Idense = identityoperator(DenseOperator, b_l) I = identityoperator(SparseOperator, b_l) @test isa(I, SparseOperator) -@test full(I) == Idense +@test dense(I) == Idense @test 1e-11 > D(xbra1*I, xbra1) @test I == identityoperator(SparseOperator, b1a) ⊗ identityoperator(SparseOperator, b2a) ⊗ identityoperator(SparseOperator, b3a) -# Test trace and normalize +# Test tr and normalize op = sparse(DenseOperator(GenericBasis(3), [1 3 2;5 2 2;-1 2 5])) -@test 8 == trace(op) +@test 8 == tr(op) op_normalized = normalize(op) -@test 8 == trace(op) -@test 1 == trace(op_normalized) +@test 8 == tr(op) +@test 1 == tr(op_normalized) # op_ = normalize!(op) # @test op_ === op -# @test 1 == trace(op) +# @test 1 == tr(op) -# Test partial trace +# Test partial tr b1 = GenericBasis(3) b2 = GenericBasis(5) b3 = GenericBasis(7) @@ -127,7 +128,7 @@ op1 = sprandop(b1) op2 = sprandop(b2) op3 = sprandop(b3) op123 = op1 ⊗ op2 ⊗ op3 -op123_ = full(op123) +op123_ = dense(op123) @test 1e-14 > D(ptrace(op123_, 3), ptrace(op123, 3)) @test 1e-14 > D(ptrace(op123_, 2), ptrace(op123, 2)) @@ -162,11 +163,11 @@ op1b = sprandop(b1a, b1b) op2a = sprandop(b2a, b2b) op2b = sprandop(b2a, b2b) op3a = sprandop(b3a, b3b) -op1a_ = full(op1a) -op1b_ = full(op1b) -op2a_ = full(op2a) -op2b_ = full(op2b) -op3a_ = full(op3a) +op1a_ = dense(op1a) +op1b_ = dense(op1b) +op2a_ = dense(op2a) +op2b_ = dense(op2b) +op3a_ = dense(op3a) op123 = op1a ⊗ op2a ⊗ op3a op123_ = op1a_ ⊗ op2a_ ⊗ op3a_ @test op123.basis_l == b_l @@ -230,11 +231,11 @@ I = identityoperator(b) @test diagonaloperator(b, [1, 1, 1, 1]) == I @test diagonaloperator(b, [1., 1., 1., 1.]) == I @test diagonaloperator(b, [1im, 1im, 1im, 1im]) == 1im*I -@test diagonaloperator(b, [0:3;]) == sparse(DenseOperator(b, diagm([0:3;]))) +@test diagonaloperator(b, [0:3;]) == sparse(DenseOperator(b, Diagonal([0:3;]))) # Test gemv op = sprandop(b_l, b_r) -op_ = full(op) +op_ = dense(op) xket = randstate(b_l) xbra = dagger(xket) @@ -268,7 +269,7 @@ b2 = GenericBasis(5) b3 = GenericBasis(7) op = sprandop(b1, b2) -op_ = full(op) +op_ = dense(op) state = randoperator(b2, b3) result_ = randoperator(b1, b3) @@ -300,7 +301,7 @@ b2 = GenericBasis(60) b3 = GenericBasis(55) op = sprandop(b1, b2) -op_ = full(op) +op_ = dense(op) state = randoperator(b2, b3) result_ = randoperator(b1, b3) @@ -329,7 +330,7 @@ operators.gemm!(alpha, state, op, beta, result) # Test remaining uncovered code @test_throws DimensionMismatch SparseOperator(b1, b2, zeros(10, 10)) dat = sprandop(b1, b1).data -@test SparseOperator(b1, dat) == SparseOperator(b1, Matrix{Complex128}(dat)) +@test SparseOperator(b1, dat) == SparseOperator(b1, Matrix{ComplexF64}(dat)) @test_throws ArgumentError sparse(TestOperator()) diff --git a/test/test_particle.jl b/test/test_particle.jl index f822dca7..61e9d6a1 100644 --- a/test/test_particle.jl +++ b/test/test_particle.jl @@ -1,11 +1,12 @@ -using Base.Test +using Test using QuantumOptics +using FFTW, LinearAlgebra, Random @testset "particle" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) N = 200 xmin = -32.5 @@ -23,7 +24,7 @@ x0 = 5.1 p0 = -3.2 sigma = 1. sigma_x = sigma/sqrt(2) -sigma_p = 1./(sigma*sqrt(2)) +sigma_p = 1.0/(sigma*sqrt(2)) psi0_bx = gaussianstate(basis_position, x0, p0, sigma) psi0_bp = gaussianstate(basis_momentum, x0, p0, sigma) @@ -34,7 +35,7 @@ psi0_bp = gaussianstate(basis_momentum, x0, p0, sigma) p_bx = momentum(basis_position) x_bx = position(basis_position) -@test 1e-10 > D(p_bx, transform(basis_position, basis_momentum)*full(momentum(basis_momentum))*transform(basis_momentum, basis_position)) +@test 1e-10 > D(p_bx, transform(basis_position, basis_momentum)*dense(momentum(basis_momentum))*transform(basis_momentum, basis_position)) p_bp = momentum(basis_momentum) x_bp = position(basis_momentum) @@ -65,7 +66,7 @@ function transformation(b1::MomentumBasis, b2::PositionBasis, psi::Ket) throw(IncompatibleBases()) end N = b1.N - psi_shifted = exp.(1im*b2.xmin*(particle.samplepoints(b1)-b1.pmin)).*psi.data + psi_shifted = exp.(1im*b2.xmin*(particle.samplepoints(b1) .- b1.pmin)).*psi.data psi_fft = exp.(1im*b1.pmin*particle.samplepoints(b2)).*ifft(psi_shifted)*sqrt(N) return Ket(b2, psi_fft) end @@ -77,7 +78,7 @@ function transformation(b1::PositionBasis, b2::MomentumBasis, psi::Ket) throw(IncompatibleBases()) end N = b1.N - psi_shifted = exp.(-1im*b2.pmin*(particle.samplepoints(b1)-b1.xmin)).*psi.data + psi_shifted = exp.(-1im*b2.pmin*(particle.samplepoints(b1) .- b1.xmin)).*psi.data psi_fft = exp.(-1im*b1.xmin*particle.samplepoints(b2)).*fft(psi_shifted)/sqrt(N) return Ket(b2, psi_fft) end @@ -126,30 +127,30 @@ operators.gemv!(Complex(1.), Txp, psi0_bp, Complex(1.), psi_) alpha = complex(3.2) beta = complex(-1.2) -randdata1 = rand(Complex128, N) -randdata2 = rand(Complex128, N) +randdata1 = rand(ComplexF64, N) +randdata2 = rand(ComplexF64, N) state = Ket(basis_position, randdata1) result_ = Ket(basis_momentum, copy(randdata2)) -result0 = alpha*full(Tpx)*state + beta*result_ +result0 = alpha*dense(Tpx)*state + beta*result_ operators.gemv!(alpha, Tpx, state, beta, result_) @test 1e-11 > norm(result0 - result_) state = Bra(basis_position, randdata1) result_ = Bra(basis_momentum, copy(randdata2)) -result0 = alpha*state*full(Txp) + beta*result_ +result0 = alpha*state*dense(Txp) + beta*result_ operators.gemv!(alpha, state, Txp, beta, result_) @test 1e-11 > norm(result0 - result_) state = Ket(basis_momentum, randdata1) result_ = Ket(basis_position, copy(randdata2)) -result0 = alpha*full(Txp)*state + beta*result_ +result0 = alpha*dense(Txp)*state + beta*result_ operators.gemv!(alpha, Txp, state, beta, result_) @test 1e-11 > norm(result0 - result_) state = Bra(basis_momentum, randdata1) result_ = Bra(basis_position, copy(randdata2)) -result0 = alpha*state*full(Tpx) + beta*result_ +result0 = alpha*state*dense(Tpx) + beta*result_ operators.gemv!(alpha, state, Tpx, beta, result_) @test 1e-11 > norm(result0 - result_) @@ -183,28 +184,28 @@ operators.gemm!(Complex(1.), rho0_px, Txp, Complex(0.), rho_) alpha = complex(3.2) beta = complex(-1.2) -randdata1 = rand(Complex128, N, N) -randdata2 = rand(Complex128, N, N) +randdata1 = rand(ComplexF64, N, N) +randdata2 = rand(ComplexF64, N, N) op = DenseOperator(basis_position, basis_position, randdata1) result_ = DenseOperator(basis_momentum, basis_position, copy(randdata2)) -result0 = alpha*full(Tpx)*op + beta*result_ +result0 = alpha*dense(Tpx)*op + beta*result_ operators.gemm!(alpha, Tpx, op, beta, result_) @test 1e-11 > D(result0, result_) result_ = DenseOperator(basis_position, basis_momentum, copy(randdata2)) -result0 = alpha*op*full(Txp) + beta*result_ +result0 = alpha*op*dense(Txp) + beta*result_ operators.gemm!(alpha, op, Txp, beta, result_) @test 1e-11 > D(result0, result_) op = DenseOperator(basis_momentum, basis_momentum, randdata1) result_ = DenseOperator(basis_position, basis_momentum, copy(randdata2)) -result0 = alpha*full(Txp)*op + beta*result_ +result0 = alpha*dense(Txp)*op + beta*result_ operators.gemm!(alpha, Txp, op, beta, result_) @test 1e-11 > D(result0, result_) result_ = DenseOperator(basis_momentum, basis_position, copy(randdata2)) -result0 = alpha*op*full(Tpx) + beta*result_ +result0 = alpha*op*dense(Tpx) + beta*result_ operators.gemm!(alpha, op, Tpx, beta, result_) @test 1e-11 > D(result0, result_) @@ -217,7 +218,7 @@ operators.gemv!(Complex(1.), LazyProduct(Txp, Tpx), psi0_bx, Complex(0.), psi_) @test 1e-12 > norm(Txp*(Tpx*psi0_bx) - psi0_bx) psi_ = deepcopy(psi0_bx) -I = full(identityoperator(basis_momentum)) +I = dense(identityoperator(basis_momentum)) operators.gemv!(Complex(1.), LazyProduct(Txp, I, Tpx), psi0_bx, Complex(0.), psi_) @test 1e-12 > norm(psi_ - psi0_bx) @test 1e-12 > norm(Txp*I*(Tpx*psi0_bx) - psi0_bx) @@ -241,21 +242,21 @@ x0 = [5.1, -0.2] p0 = [-3.2, 1.33] sigma = [1., 0.9] sigma_x = sigma./sqrt(2) -sigma_p = 1./(sigma.*sqrt(2)) +sigma_p = 1.0/(sigma.*sqrt(2)) Txp = transform(tensor(basis_position...), tensor(basis_momentum...)) Tpx = transform(tensor(basis_momentum...), tensor(basis_position...)) Txp_sub = [transform(basis_position[i], basis_momentum[i]) for i=1:2] Tpx_sub = dagger.(Txp_sub) -Txp_full = full.(Txp_sub) -Txp_comp = tensor(Txp_full...) +Txp_dense = dense.(Txp_sub) +Txp_comp = tensor(Txp_dense...) -difference = (full(Txp) - Txp_comp).data -@test isapprox(difference, zeros(difference); atol=1e-12) +difference = (dense(Txp) - Txp_comp).data +@test isapprox(difference, zero(difference); atol=1e-12) -psi0_x = gaussianstate.(basis_position, x0, p0, sigma_x) -psi0_p = gaussianstate.(basis_momentum, x0, p0, sigma_p) +psi0_x = [gaussianstate(basis_position[i], x0[i], p0[i], sigma_x[i]) for i=1:2] +psi0_p = [gaussianstate(basis_momentum[i], x0[i], p0[i], sigma_p[i]) for i=1:2] psi_p_fft = Tpx*tensor(psi0_x...) psi_p_fft2 = tensor((Tpx_sub.*psi0_x)...) @@ -273,12 +274,12 @@ psi_x_fft = dagger(tensor(psi0_p...))*Tpx psi_x_fft2 = tensor((dagger.(psi0_p).*Tpx_sub)...) @test norm(psi_p_fft - psi_p_fft2) < 1e-15 -difference = (full(Txp) - identityoperator(DenseOperator, Txp.basis_l)*Txp).data -@test isapprox(difference, zeros(difference); atol=1e-12) +difference = (dense(Txp) - identityoperator(DenseOperator, Txp.basis_l)*Txp).data +@test isapprox(difference, zero(difference); atol=1e-12) @test_throws AssertionError transform(tensor(basis_position...), tensor(basis_position...)) @test_throws particle.IncompatibleBases transform(SpinBasis(1//2)^2, SpinBasis(1//2)^2) -@test full(Txp) == full(Txp_sub[1] ⊗ Txp_sub[2]) +@test dense(Txp) == dense(Txp_sub[1] ⊗ Txp_sub[2]) # Test ket only FFTs Txp = transform(tensor(basis_position...), tensor(basis_momentum...); ket_only=true) @@ -328,17 +329,17 @@ psi2_fft2 = tensor(Tpx_sub[1]*psi0_x[1], psi_fock, Tpx_sub[2]*psi0_x[2]) Txp = transform(basis_l, basis_r) Txp_sub = [transform(basis_position[i], basis_momentum[i]) for i=1:2] -difference = (full(Txp) - tensor(full(Txp_sub[1]), full(one(bc)), full(Txp_sub[2]))).data -@test isapprox(difference, zeros(difference); atol=1e-12) +difference = (dense(Txp) - tensor(dense(Txp_sub[1]), dense(one(bc)), dense(Txp_sub[2]))).data +@test isapprox(difference, zero(difference); atol=1e-12) basis_l = tensor(bc, basis_position[1], basis_position[2]) basis_r = tensor(bc, basis_momentum[1], basis_momentum[2]) Txp2 = transform(basis_l, basis_r) Tpx2 = transform(basis_r, basis_l) -difference = (full(Txp) - permutesystems(full(Txp2), [2, 1, 3])).data -@test isapprox(difference, zeros(difference); atol=1e-13) -difference = (full(dagger(Txp)) - permutesystems(full(Tpx2), [2, 1, 3])).data -@test isapprox(difference, zeros(difference); atol=1e-13) +difference = (dense(Txp) - permutesystems(dense(Txp2), [2, 1, 3])).data +@test isapprox(difference, zero(difference); atol=1e-13) +difference = (dense(dagger(Txp)) - permutesystems(dense(Tpx2), [2, 1, 3])).data +@test isapprox(difference, zero(difference); atol=1e-13) # Test potentialoperator in more than 1D N = [21, 18] @@ -361,7 +362,7 @@ bcomp_pos = tensor(basis_position...) Txp = transform(bcomp_pos, bcomp_mom) Tpx = transform(bcomp_mom, bcomp_pos) xsample, ysample = samplepoints.(basis_position) -V_op = Tpx*full(diagonaloperator(bcomp_pos, [V(x, y) for y in ysample for x in xsample]))*Txp +V_op = Tpx*dense(diagonaloperator(bcomp_pos, [V(x, y) for y in ysample for x in xsample]))*Txp V_op2 = potentialoperator(bcomp_mom, V) @test V_op == V_op2 diff --git a/test/test_phasespace.jl b/test/test_phasespace.jl old mode 100755 new mode 100644 index 22b6f8a6..0fc61413 --- a/test/test_phasespace.jl +++ b/test/test_phasespace.jl @@ -1,11 +1,12 @@ -using Base.Test +using Test using QuantumOptics +using Random, LinearAlgebra @testset "phasespace" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) # Test quasi-probability functions b = FockBasis(100) @@ -32,7 +33,7 @@ Wrho_fock = wigner(rho_fock, X, Y) laguerre3(x) = (-x^3+9x^2-18x+6)/6 for (i,x)=enumerate(X), (j,y)=enumerate(Y) - beta = 1./sqrt(2)*complex(x, y) + beta = 1.0/sqrt(2)*complex(x, y) betastate = coherentstate(b, beta) q_coherent = 1/pi*exp(-abs2(alpha-beta)) @@ -103,7 +104,7 @@ ssq = sx^2 + sy^2 + sz^2 qsu2sx = qfuncsu2(csssx,theta,phi) qsu2sxdm = qfuncsu2(dmcsssx,theta,phi) res = 250 -costhetam = Array{Float64}(res,2*res) +costhetam = Array{Float64}(undef,res,2*res) for i = 1:res, j = 1:2*res costhetam[i,j] = sin(i*1pi/(res-1)) end diff --git a/test/test_polynomials.jl b/test/test_polynomials.jl index 3ee9f8ef..1ad9d046 100644 --- a/test/test_polynomials.jl +++ b/test/test_polynomials.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics using QuantumOptics.polynomials @@ -11,7 +11,7 @@ x0 = 1.3 @test horner(c, x0) == c[1] + c[2]*x0 + c[3]*x0^2 # Test Hermite polynomials -an = Vector{Vector{Int}}(8) +an = Vector{Vector{Int}}(undef, 8) an[1] = [1] an[2] = [0,2] an[3] = [-2, 0, 4] diff --git a/test/test_printing.jl b/test/test_printing.jl index c2bb00d4..fd50f0a0 100644 --- a/test/test_printing.jl +++ b/test/test_printing.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "printing" begin @@ -21,9 +21,9 @@ b_mb = ManyBodyBasis(b_fock, fermionstates(b_fock, 2)) @test sprint(show, b_mb) == "ManyBody(onebodybasis=Fock(cutoff=4), states:10)" state = fockstate(FockBasis(2), 2) -@test sprint(show, state) == "Ket(dim=3)\n basis: Fock(cutoff=2)\n 0.0+0.0im\n 0.0+0.0im\n 1.0+0.0im" +@test sprint(show, state) == "Ket(dim=3)\n basis: Fock(cutoff=2)\n 0.0 + 0.0im\n 0.0 + 0.0im\n 1.0 + 0.0im" state = dagger(state) -@test sprint(show, state) == "Bra(dim=3)\n basis: Fock(cutoff=2)\n 0.0-0.0im\n 0.0-0.0im\n 1.0-0.0im" +@test sprint(show, state) == "Bra(dim=3)\n basis: Fock(cutoff=2)\n 0.0 - 0.0im\n 0.0 - 0.0im\n 1.0 - 0.0im" op = DenseOperator(FockBasis(1)) @test sprint(show, op) == "DenseOperator(dim=2x2) @@ -49,7 +49,7 @@ op = SparseOperator(b_fock, b_fock ⊗ SpinBasis(1//2)) op = SparseOperator(b_fock) op.data[2,2] = 1 -@test replace(sprint(show, op), "\t", " ") == "SparseOperator(dim=5x5) +@test replace(sprint(show, op), "\t" => " ") == "SparseOperator(dim=5x5) basis: Fock(cutoff=4)\n [2, 2] = 1.0+0.0im" op = LazySum(SparseOperator(b_fock), DenseOperator(b_fock)) @@ -82,23 +82,23 @@ Tpx = transform(bp, bx) QuantumOptics.set_printing(standard_order=true) n = fockstate(b_fock, 1) -@test sprint(show, n) == "Ket(dim=3)\n basis: Fock(cutoff=2)\n 0.0+0.0im\n 1.0+0.0im\n 0.0+0.0im" +@test sprint(show, n) == "Ket(dim=3)\n basis: Fock(cutoff=2)\n 0.0 + 0.0im\n 1.0 + 0.0im\n 0.0 + 0.0im" spin1 = spindown(b_spin) spin2 = spinup(b_spin) state = n ⊗ spin1 ⊗ spin2 state_data = kron(n.data, spin1.data, spin2.data) -type_len = length("Complex{Float64}") -state_data_str = join(split(sprint(show, state_data)[type_len+2:end-1], ','), "\n") +state_data_str = sprint(Base.print_array, state_data) @test sprint(show, state) == "Ket(dim=12) - basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n "*state_data_str + basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n"*state_data_str -state_data_str = join(split(sprint(show, state_data')[type_len+2:end-1]), "\n ") +state_data_str = sprint(Base.print_array, conj.(state_data)) @test sprint(show, dagger(state)) == "Bra(dim=12) - basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n "*state_data_str + basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n"*state_data_str op = dm(state) op_data = state_data * state_data' +type_len = length("Complex{Float64}") op_data_str1 = split(sprint(show, op_data)[type_len+2:end-1], ";") for i=1:length(op_data_str1) op_data_str1[i] = join(split(op_data_str1[i]), " ") @@ -117,7 +117,7 @@ paulix, pauliy = sigmax(b_spin), sigmay(b_spin) pauli = paulix ⊗ pauliy @test sprint(show, pauli) == "SparseOperator(dim=4x4)\n basis: [Spin(1/2) ⊗ Spin(1/2)]\n [4, 1] = 0.0+1.0im\n [3, 2] = 0.0-1.0im\n [2, 3] = 0.0+1.0im\n [1, 4] = 0.0-1.0im" @test sprint((io, x) -> show(IOContext(io, :limit=>true, :displaysize=>(20,80)), x), pauli ⊗ pauli) == "SparseOperator(dim=16x16)\n basis: [Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n [16, 1] = -1.0+0.0im\n [15, 2] = 1.0+0.0im\n [14, 3] = -1.0+0.0im\n [13, 4] = 1.0+0.0im\n [12, 5] = 1.0+0.0im\n ⋮\n [6 , 11] = -1.0+0.0im\n [5 , 12] = 1.0+0.0im\n [4 , 13] = 1.0+0.0im\n [3 , 14] = -1.0-0.0im\n [2 , 15] = 1.0+0.0im\n [1 , 16] = -1.0-0.0im" -@test sprint(show, full(pauli)) == "DenseOperator(dim=4x4) +@test sprint(show, dense(pauli)) == "DenseOperator(dim=4x4) basis: [Spin(1/2) ⊗ Spin(1/2)] 0.0+0.0im 0.0+0.0im 0.0+0.0im 0.0-1.0im 0.0+0.0im 0.0+0.0im 0.0+1.0im 0.0+0.0im @@ -126,10 +126,10 @@ pauli = paulix ⊗ pauliy hadamard = DenseOperator(b_spin, 1/sqrt(2) * [1 1; 1 -1]) @test sprint(show, sigmax(b_spin) ⊗ hadamard) == "SparseOperator(dim=4x4)\n basis: [Spin(1/2) ⊗ Spin(1/2)]\n [3, 1] = 0.707107+0.0im\n [4, 1] = 0.707107+0.0im\n [3, 2] = 0.707107+0.0im\n [4, 2] = -0.707107+0.0im\n [1, 3] = 0.707107+0.0im\n [2, 3] = 0.707107+0.0im\n [1, 4] = 0.707107+0.0im\n [2, 4] = -0.707107+0.0im" -@test sprint((io, x) -> show(IOContext(io, :limit=>true, :displaysize=>(20,80)), x), full(sigmax(b_spin) ⊗ hadamard ⊗ hadamard ⊗ hadamard)) == +@test sprint((io, x) -> show(IOContext(io, :limit=>true, :displaysize=>(20,80)), x), dense(sigmax(b_spin) ⊗ hadamard ⊗ hadamard ⊗ hadamard)) == "DenseOperator(dim=16x16)\n basis: [Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n 0.0+0.0im 0.0+0.0im … 0.353553+0.0im 0.353553+0.0im\n 0.0+0.0im 0.0+0.0im 0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im 0.353553-0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im … -0.353553+0.0im 0.353553-0.0im\n 0.0+0.0im 0.0+0.0im 0.353553+0.0im 0.353553+0.0im\n 0.0+0.0im 0.0+0.0im 0.353553+0.0im -0.353553+0.0im\n 0.353553+0.0im 0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im 0.353553+0.0im … 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im 0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im 0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im … 0.0+0.0im 0.0+0.0im" -@test sprint((io, x) -> show(IOContext(io, :limit=>true, :displaysize=>(16,80)), x), full(sigmax(b_spin) ⊗ hadamard ⊗ hadamard ⊗ hadamard)) == +@test sprint((io, x) -> show(IOContext(io, :limit=>true, :displaysize=>(16,80)), x), dense(sigmax(b_spin) ⊗ hadamard ⊗ hadamard ⊗ hadamard)) == "DenseOperator(dim=16x16)\n basis: [Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n 0.0+0.0im 0.0+0.0im … 0.353553+0.0im 0.353553+0.0im\n 0.0+0.0im 0.0+0.0im 0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im 0.353553-0.0im\n 0.0+0.0im 0.0+0.0im -0.353553+0.0im -0.353553+0.0im\n 0.0+0.0im 0.0+0.0im … -0.353553+0.0im 0.353553-0.0im\n ⋮ ⋱ ⋮ \n 0.353553+0.0im -0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im 0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im 0.353553+0.0im 0.0+0.0im 0.0+0.0im\n 0.353553+0.0im -0.353553+0.0im … 0.0+0.0im 0.0+0.0im" nlevel1 = basisstate(NLevelBasis(4), 1) @@ -141,9 +141,9 @@ nlevel3 = basisstate(NLevelBasis(3), 3) # Test switching back QuantumOptics.set_printing(standard_order=false) state_data = kron(spin2.data, spin1.data, n.data) -state_data_str = join(split(sprint(show, state_data)[type_len+2:end-1], ','), "\n") +state_data_str = sprint(Base.print_array, state_data) @test sprint(show, state) == "Ket(dim=12) - basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n "*state_data_str + basis: [Fock(cutoff=2) ⊗ Spin(1/2) ⊗ Spin(1/2)]\n"*state_data_str end # testset diff --git a/test/test_semiclassical.jl b/test/test_semiclassical.jl index 95236818..678f4d59 100644 --- a/test/test_semiclassical.jl +++ b/test/test_semiclassical.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "semiclassical" begin @@ -41,7 +42,7 @@ x_rho = semiclassical.State(rho, [complex(1., 0.)]) @test variance(n, x_rho) ≈ abs2(alpha) @test variance(n, [x_ket, x_rho]) ≈ [abs2(alpha), abs2(alpha)] -# Test partial trace +# Test partial tr b1 = GenericBasis(3) b2 = GenericBasis(5) b = b1 ⊗ b2 @@ -58,7 +59,7 @@ x = semiclassical.State(rho, [0.4, -0.3im]) # Test dm function b = GenericBasis(4) psi = randstate(b) -u = Complex128[complex(2., 3.)] +u = ComplexF64[complex(2., 3.)] state = semiclassical.State(psi, u) @test dm(state) == semiclassical.State(dm(psi), u) @@ -66,10 +67,10 @@ state = semiclassical.State(psi, u) # Test casting between and semiclassical states and complex vectors b = GenericBasis(4) rho = randoperator(b) -u = rand(Complex128, 3) +u = rand(ComplexF64, 3) state = semiclassical.State(rho, u) -state_ = semiclassical.State(randoperator(b), rand(Complex128, 3)) -x = Vector{Complex128}(16+3) +state_ = semiclassical.State(randoperator(b), rand(ComplexF64, 3)) +x = Vector{ComplexF64}(undef, 19) semiclassical.recast!(state, x) semiclassical.recast!(x, state_) @test state_ == state @@ -107,7 +108,7 @@ function fclassical(t, quantumstate, u, du) du[1] = -1*u[1] end -state0 = semiclassical.State(psi0, Complex128[complex(2., 3.)]) +state0 = semiclassical.State(psi0, ComplexF64[complex(2., 3.)]) function f(t, state::semiclassical.State{Ket}) @test 1e-5 > norm(state.quantum - U(t)*psi0) @test 1e-5 > abs(state.classical[1] - state0.classical[1]*exp(-t)) diff --git a/test/test_sortedindices.jl b/test/test_sortedindices.jl index f748f472..79d3ed6a 100644 --- a/test/test_sortedindices.jl +++ b/test/test_sortedindices.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics diff --git a/test/test_sparsematrix.jl b/test/test_sparsematrix.jl index 476c7366..c6bb6aba 100644 --- a/test/test_sparsematrix.jl +++ b/test/test_sparsematrix.jl @@ -1,21 +1,22 @@ -using Base.Test +using Test using QuantumOptics.sparsematrix +using SparseArrays, LinearAlgebra # SparseMatrix = quantumoptics.sparsematrix.SparseMatrix -const SparseMatrix = SparseMatrixCSC{Complex128, Int} +const SparseMatrix = SparseMatrixCSC{ComplexF64, Int} @testset "sparsematrix" begin # Set up test matrices -A = rand(Complex128, 5, 5) +A = rand(ComplexF64, 5, 5) A_sp = sparse(A) -B = eye(Complex128, 5) -B_sp = speye(Complex128, 5) +B = Matrix{ComplexF64}(I, 5, 5) +B_sp = sparse(ComplexF64(1)*I, 5, 5) -C = rand(Complex128, 3, 3) -C[2,:] = 0 +C = rand(ComplexF64, 3, 3) +C[2,:] .= 0 C_sp = sparse(C) R_sp = A_sp + B_sp @@ -23,13 +24,13 @@ R = A + B # Test arithmetic -@test 0 ≈ norm(full(R_sp) - R) -@test 0 ≈ norm(full(Complex128(0.5,0)*A_sp) - 0.5*A) -@test 0 ≈ norm(full(A_sp/2) - A/2) -@test 0 ≈ norm(full(A_sp*B_sp) - A*B) +@test 0 ≈ norm(Matrix(R_sp) - R) +@test 0 ≈ norm(Matrix(ComplexF64(0.5,0)*A_sp) - 0.5*A) +@test 0 ≈ norm(Matrix(A_sp/2) - A/2) +@test 0 ≈ norm(Matrix(A_sp*B_sp) - A*B) # Test kronecker product -@test 0 ≈ norm(full(kron(A_sp, C_sp)) - kron(A, C)) -@test 0 ≈ norm(full(kron(A_sp, B_sp)) - kron(A, B)) +@test 0 ≈ norm(Matrix(kron(A_sp, C_sp)) - kron(A, C)) +@test 0 ≈ norm(Matrix(kron(A_sp, B_sp)) - kron(A, B)) end # testset diff --git a/test/test_spectralanalysis.jl b/test/test_spectralanalysis.jl index ebff6831..14d4e895 100644 --- a/test/test_spectralanalysis.jl +++ b/test/test_spectralanalysis.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra, SparseArrays, Random mutable struct SpectralanalysisTestOperator <: Operator end @@ -7,7 +8,7 @@ mutable struct SpectralanalysisTestOperator <: Operator end srand(0) -sprandop(b) = sparse(DenseOperator(b, rand(Complex128, length(b), length(b)))) +sprandop(b) = sparse(DenseOperator(b, rand(ComplexF64, length(b), length(b)))) # Test diagonalization @test_throws ArgumentError eigenstates(SpectralanalysisTestOperator()) @@ -20,9 +21,9 @@ sprandop(b) = sparse(DenseOperator(b, rand(Complex128, length(b), length(b)))) b = GenericBasis(5) a = randoperator(b) H = (a+dagger(a))/2 -U = expm(1im*H) +U = exp(1im*H) d = [-3, -2.6, -0.1, 0.0, 0.6] -D = DenseOperator(b, diagm(d)) +D = DenseOperator(b, diagm(0 => d)) Dsp = sparse(D) @test eigenenergies(D) ≈ d @test eigenenergies(Dsp, 3, info=false) ≈ d[1:3] @@ -48,9 +49,9 @@ end b = GenericBasis(5) a = randoperator(b) H = (a+dagger(a))/2 -U = expm(1im*H) +U = exp(1im*H) d = [-3+0.2im, -2.6-0.1im, -0.1+0.5im, 0.0, 0.6+0.3im] -D = DenseOperator(b, diagm(d)) +D = DenseOperator(b, diagm(0 => d)) Dsp = sparse(D) @test eigenenergies(D; warning=false) ≈ d @test eigenenergies(Dsp, 3; warning=false, info=false) ≈ d[1:3] @@ -78,9 +79,9 @@ sx = sigmax(spinbasis) sy = sigmay(spinbasis) sz = sigmaz(spinbasis) twospinbasis = spinbasis ⊗ spinbasis -Sx = full(sum([embed(twospinbasis, i, sx) for i=1:2]))/2. -Sy = full(sum([embed(twospinbasis, i, sy) for i=1:2]))/2. -Sz = full(sum([embed(twospinbasis, i, sz) for i=1:2]))/2. +Sx = dense(sum([embed(twospinbasis, i, sx) for i=1:2]))/2. +Sy = dense(sum([embed(twospinbasis, i, sy) for i=1:2]))/2. +Sz = dense(sum([embed(twospinbasis, i, sz) for i=1:2]))/2. Ssq = Sx^2 + Sy^2 + Sz^2 d, v = simdiag([Sz, Ssq]) @test d[1] == [-1.0, 0, 0, 1.0] @@ -88,16 +89,16 @@ d, v = simdiag([Sz, Ssq]) @test_throws ErrorException simdiag([Sx, Sy]) threespinbasis = spinbasis ⊗ spinbasis ⊗ spinbasis -Sx3 = full(sum([embed(threespinbasis, i, sx) for i=1:3])/2.) -Sy3 = full(sum([embed(threespinbasis, i, sy) for i=1:3])/2.) -Sz3 = full(sum([embed(threespinbasis, i, sz) for i=1:3])/2.) +Sx3 = dense(sum([embed(threespinbasis, i, sx) for i=1:3])/2.) +Sy3 = dense(sum([embed(threespinbasis, i, sy) for i=1:3])/2.) +Sz3 = dense(sum([embed(threespinbasis, i, sz) for i=1:3])/2.) Ssq3 = Sx3^2 + Sy3^2 + Sz3^2 d3, v3 = simdiag([Ssq3, Sz3]) -dsq3_std = eigenenergies(full(Ssq3)) -@test diagm(dsq3_std) ≈ v3'*Ssq3.data*v3 +dsq3_std = eigenenergies(dense(Ssq3)) +@test diagm(0 => dsq3_std) ≈ v3'*Ssq3.data*v3 fockbasis = FockBasis(4) @test_throws ErrorException simdiag([Sy3, Sz3]) -@test_throws ErrorException simdiag([full(destroy(fockbasis)), full(create(fockbasis))]) +@test_throws ErrorException simdiag([dense(destroy(fockbasis)), dense(create(fockbasis))]) end # testset diff --git a/test/test_spin.jl b/test/test_spin.jl index 84d9a385..53becbaa 100644 --- a/test/test_spin.jl +++ b/test/test_spin.jl @@ -1,9 +1,10 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "spin" begin -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) # Test creation @test_throws AssertionError SpinBasis(1//3) @@ -23,9 +24,9 @@ for spinnumber=[1//2, 1, 3//2, 4//2] # Test traces - @test 0 == trace(sx) - @test 0 == trace(sy) - @test 0 == trace(sz) + @test 0 == tr(sx) + @test 0 == tr(sy) + @test 0 == tr(sz) # Test kommutation relations diff --git a/test/test_state_definitions.jl b/test/test_state_definitions.jl index 58a173d9..fde9987a 100644 --- a/test/test_state_definitions.jl +++ b/test/test_state_definitions.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "state_definitions" begin diff --git a/test/test_states.jl b/test/test_states.jl index 12d83478..c45e2351 100644 --- a/test/test_states.jl +++ b/test/test_states.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra, Random @testset "states" begin @@ -74,13 +75,13 @@ ket_b3 = randstate(b3) @test 1e-14 > D((bra_b1 ⊗ bra_b2) ⊗ bra_b3, bra_b1 ⊗ (bra_b2 ⊗ bra_b3)) ket_b1b2 = ket_b1 ⊗ ket_b2 -shape = (ket_b1b2.basis.shape...) -idx = sub2ind(shape, 2, 3) +shape = (ket_b1b2.basis.shape...,) +idx = LinearIndices(shape)[2, 3] @test ket_b1b2.data[idx] == ket_b1.data[2]*ket_b2.data[3] ket_b1b2b3 = ket_b1 ⊗ ket_b2 ⊗ ket_b3 @test ket_b1b2b3 == tensor(ket_b1, ket_b2, ket_b3) -shape = (ket_b1b2b3.basis.shape...) -idx = sub2ind(shape, 1, 4, 3) +shape = (ket_b1b2b3.basis.shape...,) +idx = LinearIndices(shape)[1, 4, 3] @test ket_b1b2b3.data[idx] == ket_b1.data[1]*ket_b2.data[4]*ket_b3.data[3] diff --git a/test/test_steadystate.jl b/test/test_steadystate.jl index 98b73aa5..4797b70d 100644 --- a/test/test_steadystate.jl +++ b/test/test_steadystate.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "steadystate" begin @@ -27,13 +28,13 @@ Ha = embed(basis, 1, 0.5*ωa*sz) Hc = embed(basis, 2, ωc*number(fockbasis)) Hint = sm ⊗ create(fockbasis) + sp ⊗ destroy(fockbasis) H = Ha + Hc + Hint -Hdense = full(H) +Hdense = dense(H) Ja = embed(basis, 1, sqrt(γ)*sm) Ja2 = embed(basis, 1, sqrt(0.5*γ)*sp) Jc = embed(basis, 2, sqrt(κ)*destroy(fockbasis)) J = [Ja, Jc] -Jdense = map(full, J) +Jdense = map(dense, J) Ψ₀ = spinup(spinbasis) ⊗ fockstate(fockbasis, 2) ρ₀ = dm(Ψ₀) @@ -67,7 +68,7 @@ ev, ops = steadystate.liouvillianspectrum(Hdense, Jdense) @test ev[sortperm(abs.(ev))] == ev ev, ops = steadystate.liouvillianspectrum(H, sqrt(2).*J; rates=0.5.*ones(length(J)), nev = 1) -@test tracedistance(ρss, ops[1]/trace(ops[1])) < 1e-12 +@test tracedistance(ρss, ops[1]/tr(ops[1])) < 1e-12 @test ev[sortperm(abs.(ev))] == ev # Compute steady-state photon number of a driven cavity (analytically: η^2/κ^2) @@ -83,7 +84,7 @@ nss = expect(create(fockbasis)*destroy(fockbasis), ρss[end]) nss = expect(create(fockbasis)*destroy(fockbasis), ρss) @test n_an - real(nss) < 1e-3 -ρss = steadystate.eigenvector(full(Hp), map(full, Jp)) +ρss = steadystate.eigenvector(dense(Hp), map(dense, Jp)) nss = expect(create(fockbasis)*destroy(fockbasis), ρss) @test n_an - real(nss) < 1e-3 diff --git a/test/test_stochastic_definitions.jl b/test/test_stochastic_definitions.jl index 48990d34..9b8f9ca6 100644 --- a/test/test_stochastic_definitions.jl +++ b/test/test_stochastic_definitions.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "stochastic_definitions" begin diff --git a/test/test_stochastic_master.jl b/test/test_stochastic_master.jl index b47338e1..f11c5334 100644 --- a/test/test_stochastic_master.jl +++ b/test/test_stochastic_master.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "stochastic_master" begin diff --git a/test/test_stochastic_schroedinger.jl b/test/test_stochastic_schroedinger.jl index 85422da6..9390a59e 100644 --- a/test/test_stochastic_schroedinger.jl +++ b/test/test_stochastic_schroedinger.jl @@ -1,5 +1,7 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra +import StochasticDiffEq @testset "stochastic_schroedinger" begin diff --git a/test/test_stochastic_semiclassical.jl b/test/test_stochastic_semiclassical.jl index 7ea20b4f..a1a82a2d 100644 --- a/test/test_stochastic_semiclassical.jl +++ b/test/test_stochastic_semiclassical.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "stochastic_semiclassical" begin @@ -23,7 +24,7 @@ Jdagger = dagger.(J) C .*= rates Cdagger = dagger.(C) -u0 = Complex128[0.1, 0.5] +u0 = ComplexF64[0.1, 0.5] ψ_sc = semiclassical.State(ψ0, u0) ρ_sc = dm(ψ_sc) @@ -77,10 +78,10 @@ tout, ψt_sc = stochastic.schroedinger_semiclassical(T_short, ψ_sc, fquantum, f tout, ψt_sc = stochastic.schroedinger_semiclassical(T_short, ψ_sc, fquantum, fclassical; fstoch_quantum=fquantum_stoch, fstoch_classical=fclassical_stoch2, noise_processes=1, - noise_prototype_classical=zeros(Complex128, 2,2), dt=dt) + noise_prototype_classical=zeros(ComplexF64, 2,2), dt=dt) tout, ψt_sc = stochastic.schroedinger_semiclassical(T_short, ψ_sc, fquantum, fclassical; fstoch_classical=fclassical_stoch_ndiag, - noise_prototype_classical=zeros(Complex128, 2, 3), dt=dt) + noise_prototype_classical=zeros(ComplexF64, 2, 3), dt=dt) # Semiclassical master tout, ρt = stochastic.master_semiclassical(T, ρ_sc, fquantum_master, fclassical; @@ -89,7 +90,7 @@ tout, ρt = stochastic.master_semiclassical(T, ρ_sc, fquantum_master, fclassica fstoch_classical=fclassical_stoch, dt=dt) tout, ρt = stochastic.master_semiclassical(T, ψ_sc, fquantum_master, fclassical; fstoch_quantum=fstoch_q_master, fstoch_classical=fclassical_stoch2, - noise_prototype_classical=zeros(Complex128, 2, 2), dt=dt) + noise_prototype_classical=zeros(ComplexF64, 2, 2), dt=dt) # Test error messages @test_throws ArgumentError stochastic.schroedinger_semiclassical(T, ψ_sc, fquantum, fclassical) diff --git a/test/test_subspace.jl b/test/test_subspace.jl index 6d8370ce..606becee 100644 --- a/test/test_subspace.jl +++ b/test/test_subspace.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "subspace" begin diff --git a/test/test_superoperators.jl b/test/test_superoperators.jl index 170bde3d..58d756d0 100644 --- a/test/test_superoperators.jl +++ b/test/test_superoperators.jl @@ -1,21 +1,22 @@ -using Base.Test +using Test using QuantumOptics +using SparseArrays @testset "superoperators" begin # Test creation b = GenericBasis(3) -@test_throws DimensionMismatch DenseSuperOperator((b, b), (b, b), zeros(Complex128, 3, 3)) -@test_throws DimensionMismatch SparseSuperOperator((b, b), (b, b), spzeros(Complex128, 3, 3)) +@test_throws DimensionMismatch DenseSuperOperator((b, b), (b, b), zeros(ComplexF64, 3, 3)) +@test_throws DimensionMismatch SparseSuperOperator((b, b), (b, b), spzeros(ComplexF64, 3, 3)) -# Test copy, sparse and full +# Test copy, sparse and dense b1 = GenericBasis(2) b2 = GenericBasis(7) b3 = GenericBasis(5) b4 = GenericBasis(3) s = DenseSuperOperator((b1, b2), (b3, b4)) -s_ = full(s) +s_ = dense(s) s_.data[1,1] = 1 @test s.data[1,1] == 0 s_sparse = sparse(s_) @@ -26,9 +27,9 @@ s = SparseSuperOperator((b1, b2), (b3, b4)) s_ = sparse(s) s_.data[1,1] = 1 @test s.data[1,1] == 0 -s_full = full(s_) -@test isa(s_full, DenseSuperOperator) -@test s_full.data[1,1] == 1 +s_dense = dense(s_) +@test isa(s_dense, DenseSuperOperator) +@test s_dense.data[1,1] == 1 # Test length b1 = GenericBasis(3) @@ -167,9 +168,9 @@ end @test tracedistance(L*ρ₀, ρ) < 1e-10 tout, ρt = timeevolution.master([0.,1.], ρ₀, H, J; reltol=1e-7) -@test tracedistance(expm(full(L))*ρ₀, ρt[end]) < 1e-6 +@test tracedistance(exp(dense(L))*ρ₀, ρt[end]) < 1e-6 -@test full(spre(op1)) == spre(op1) +@test dense(spre(op1)) == spre(op1) @test_throws bases.IncompatibleBases L*op1 @test_throws bases.IncompatibleBases L*spre(sm) @@ -179,7 +180,7 @@ tout, ρt = timeevolution.master([0.,1.], ρ₀, H, J; reltol=1e-7) @test_throws AssertionError liouvillian(H, J; rates=zeros(4, 4)) -rates = diagm([1.0, 1.0]) +rates = diagm(0 => [1.0, 1.0]) @test liouvillian(H, J; rates=rates) == L end # testset diff --git a/test/test_timecorrelations.jl b/test/test_timecorrelations.jl index 83ef343c..566c5039 100644 --- a/test/test_timecorrelations.jl +++ b/test/test_timecorrelations.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "timecorrelations" begin diff --git a/test/test_timeevolution_master.jl b/test/test_timeevolution_master.jl index 96cae7cd..371223c5 100644 --- a/test/test_timeevolution_master.jl +++ b/test/test_timeevolution_master.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "master" begin @@ -38,11 +39,11 @@ Jlazy = [LazyTensor(basis, 1, sqrt(γ)*sm), Jc] Hnh = H - 0.5im*sum([dagger(J[i])*J[i] for i=1:length(J)]) -Hdense = full(H) +Hdense = dense(H) Hlazy = LazySum(Ha, Hc, Hint) -Hnh_dense = full(Hnh) -Junscaled_dense = map(full, Junscaled) -Jdense = map(full, J) +Hnh_dense = dense(Hnh) +Junscaled_dense = map(dense, Junscaled) +Jdense = map(dense, J) Ψ₀ = spinup(spinbasis) ⊗ fockstate(fockbasis, 5) ρ₀ = dm(Ψ₀) @@ -136,7 +137,7 @@ R = [cos(alpha) -sin(alpha); sin(alpha) cos(alpha)] Rt = transpose(R) Jrotated_dense = [R[1,1]*Junscaled_dense[1] + R[1,2]*Junscaled_dense[2], R[2,1]*Junscaled_dense[1] + R[2,2]*Junscaled_dense[2]] Jrotated = [SparseOperator(j) for j=Jrotated_dense] -rates_matrix = diagm(rates_vector) +rates_matrix = diagm(0 => rates_vector) rates_matrix_rotated = R * rates_matrix * Rt tout, ρt = timeevolution.master(T, ρ₀, Hdense, Jrotated_dense; rates=rates_matrix_rotated, reltol=1e-7) diff --git a/test/test_timeevolution_mcwf.jl b/test/test_timeevolution_mcwf.jl index 8e58d4b6..c1cbabe0 100644 --- a/test/test_timeevolution_mcwf.jl +++ b/test/test_timeevolution_mcwf.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using Random, LinearAlgebra @testset "mcwf" begin @@ -29,14 +30,14 @@ Ha = embed(basis, 1, 0.5*ωa*sz) Hc = embed(basis, 2, ωc*number(fockbasis)) Hint = sm ⊗ create(fockbasis) + sp ⊗ destroy(fockbasis) H = Ha + Hc + Hint -Hdense = full(H) +Hdense = dense(H) Hlazy = LazySum(Ha, Hc, Hint) # Jump operators Ja = embed(basis, 1, sqrt(γ)*sm) Jc = embed(basis, 2, sqrt(κ)*destroy(fockbasis)) J = [Ja, Jc] -Jdense = map(full, J) +Jdense = map(dense, J) Jlazy = [LazyTensor(basis, 1, sqrt(γ)*sm), LazyTensor(basis, 2, sqrt(κ)*destroy(fockbasis))] # Initial conditions @@ -93,7 +94,7 @@ tout, Ψt = timeevolution.mcwf_h(T, Ψ₀, H, J; seed=UInt(2), reltol=1e-6) # Test mcwf nh Hnh = H - 0.5im*sum([dagger(J[i])*J[i] for i=1:length(J)]) -Hnh_dense = full(Hnh) +Hnh_dense = dense(Hnh) tout, Ψt = timeevolution.mcwf_nh(T, Ψ₀, Hnh, J; seed=UInt(1), reltol=1e-6) @test norm(Ψt[end]-Ψ) < 1e-5 @@ -127,9 +128,9 @@ end # Test single jump operator J1 = [Ja] -J1_dense = map(full, J1) +J1_dense = map(dense, J1) J2 = [Ja, 0 * Jc] -J2_dense = map(full, J2) +J2_dense = map(dense, J2) tout_master, ρt_master = timeevolution.master(T, ρ₀, Hdense, J1_dense) @@ -197,7 +198,7 @@ end J3_lazy = [LazyTensor(threespinbasis, i, sm) for i=1:3] d, diagJ_lazy = diagonaljumps(rates, J3_lazy) for i=1:3 - @test full(diagJ_lazy[i]) == full(diagJ[i]) + @test dense(diagJ_lazy[i]) == dense(diagJ[i]) end # Test dynamic diff --git a/test/test_timeevolution_pumpedcavity.jl b/test/test_timeevolution_pumpedcavity.jl index ecaf140e..d74b1cb0 100644 --- a/test/test_timeevolution_pumpedcavity.jl +++ b/test/test_timeevolution_pumpedcavity.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "timeevolution_pumpedcavity" begin @@ -57,7 +57,7 @@ timeevolution.master_dynamic(T, psi0, f_HJ; fout=f_test_td) # Decay Hint_nh = Hint - 0.5im*κ*n -Γ = Matrix{Float64}(1,1) +Γ = Matrix{Float64}(undef, 1,1) Γ[1,1] = κ J = [a] Jdagger = [at] diff --git a/test/test_timeevolution_schroedinger.jl b/test/test_timeevolution_schroedinger.jl index 893bddc6..fe5d2474 100644 --- a/test/test_timeevolution_schroedinger.jl +++ b/test/test_timeevolution_schroedinger.jl @@ -1,4 +1,4 @@ -using Base.Test +using Test using QuantumOptics @testset "schroedinger" begin @@ -51,14 +51,15 @@ end tout, psi_rot_t = timeevolution.schroedinger(T, psi0, Hrot) tout, psi_t = timeevolution.schroedinger_dynamic(T, psi0, f) +n_op = dense(at*a) for (i, t) in enumerate(tout) - R = prod([embed(basis, i, expm(1im*ω[i]*t*full(at*a))) for i=1:N]) + R = prod([embed(basis, i, exp(1im*ω[i]*t*n_op)) for i=1:N]) psi_rot = psi_rot_t[i] psi = psi_t[i] # @test abs(dagger(psi_rot)*R*psi) < 1e-5 rho = dm(psi) rho_rot = dm(psi_rot) - @test tracedistance(rho_rot, full(R)*rho*dagger(full(R))) < 1e-5 + @test tracedistance(rho_rot, dense(R)*rho*dagger(dense(R))) < 1e-5 end function fout(t, psi) diff --git a/test/test_timeevolution_twolevel.jl b/test/test_timeevolution_twolevel.jl index b59e3477..23a4d377 100644 --- a/test/test_timeevolution_twolevel.jl +++ b/test/test_timeevolution_twolevel.jl @@ -1,5 +1,6 @@ -using Base.Test +using Test using QuantumOptics +using LinearAlgebra @testset "timeevolution_twolevel" begin diff --git a/test/test_transformations.jl b/test/test_transformations.jl index f32d2766..1cb92439 100644 --- a/test/test_transformations.jl +++ b/test/test_transformations.jl @@ -1,11 +1,12 @@ -using Base.Test +using Test using QuantumOptics +using Random, LinearAlgebra -@testset "fock" begin +@testset "transformation" begin srand(0) -D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2))) +D(op1::Operator, op2::Operator) = abs(tracedistance_nh(dense(op1), dense(op2))) D(x::Ket, y::Ket) = norm(x-y) # Test transformations @@ -39,4 +40,4 @@ psi_x = gaussianstate(b_position, x0/σ0, p0/σ0, σ0) -end # testset \ No newline at end of file +end # testset