From c0336cf05907187707cec29683cb1aaff63b7d9c Mon Sep 17 00:00:00 2001 From: Ashley Milsted Date: Thu, 4 May 2023 09:34:19 -0700 Subject: [PATCH] Lazy identities via FillArrays (#94) * Have identityoperator return an Eye * Handle Eyes in LazyTensor mul!() * Fix identityoperator semantics. Add a test. * Simpify _tpops_tuple * Add compat entry for FillArrays * Filter out all square eyes. Simplify code. * Try to make Aqua happier * Explicit eye tests * Only turn off ambiguity checking for reshape * Test left action. * Restore more efficient handling of isometries * Remember why we need to distinguish beta==0 * More specific Aqua exclusion * Fix test dependencies. * Handle alpha=0 and beta=0 better. * Managed to munge tests in merge. --------- Co-authored-by: Ashley Milsted --- Project.toml | 5 +- src/QuantumOpticsBase.jl | 4 +- src/operators.jl | 1 - src/operators_lazyproduct.jl | 4 + src/operators_lazysum.jl | 25 ++-- src/operators_lazytensor.jl | 213 +++++++++++++++-------------- src/operators_sparse.jl | 17 ++- test/test_aqua.jl | 6 +- test/test_operators_lazyproduct.jl | 16 +++ test/test_operators_lazysum.jl | 4 +- test/test_operators_lazytensor.jl | 56 +++++++- test/test_operators_sparse.jl | 5 + 12 files changed, 233 insertions(+), 123 deletions(-) diff --git a/Project.toml b/Project.toml index e672e9bb..26c7d156 100644 --- a/Project.toml +++ b/Project.toml @@ -5,6 +5,7 @@ version = "0.3.12" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" QuantumInterface = "5717a53b-5d69-4fa3-b976-0bf2f97ca1e5" @@ -16,6 +17,7 @@ UnsafeArrays = "c4a57d5a-5b31-53a6-b365-19f8c011fbd6" [compat] Adapt = "1, 2, 3.3" FFTW = "1.2" +FillArrays = "0.13, 1" LRUCache = "1" QuantumInterface = "0.1.0" Strided = "1, 2" @@ -27,6 +29,7 @@ Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -37,4 +40,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" UnsafeArrays = "c4a57d5a-5b31-53a6-b365-19f8c011fbd6" [targets] -test = ["LinearAlgebra", "SparseArrays", "Random", "Test", "Aqua", "JET", "Adapt", "Dates", "FFTW", "LRUCache", "Strided", "UnsafeArrays"] +test = ["LinearAlgebra", "SparseArrays", "Random", "Test", "Aqua", "JET", "Adapt", "Dates", "FFTW", "LRUCache", "Strided", "UnsafeArrays", "FillArrays"] diff --git a/src/QuantumOpticsBase.jl b/src/QuantumOpticsBase.jl index 70d7bc5b..6e06a898 100644 --- a/src/QuantumOpticsBase.jl +++ b/src/QuantumOpticsBase.jl @@ -1,6 +1,6 @@ module QuantumOpticsBase -using SparseArrays, LinearAlgebra, LRUCache, Strided, UnsafeArrays +using SparseArrays, LinearAlgebra, LRUCache, Strided, UnsafeArrays, FillArrays import LinearAlgebra: mul!, rmul! import QuantumInterface: dagger, directsum, ⊕, dm, embed, expect, permutesystems, @@ -17,7 +17,7 @@ export Basis, GenericBasis, CompositeBasis, basis, #operators_dense Operator, DenseOperator, DenseOpType, projector, dm, #operators_sparse - SparseOperator, diagonaloperator, SparseOpType, + SparseOperator, diagonaloperator, SparseOpType, EyeOpType, #operators_lazysum LazySum, #operators_lazyproduct diff --git a/src/operators.jl b/src/operators.jl index 0258e540..e3c54a5d 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -350,7 +350,6 @@ to be used in the identity matrix. identityoperator(::Type{T}, ::Type{S}, b1::Basis, b2::Basis) where {T<:AbstractOperator,S} = throw(ArgumentError("Identity operator not defined for operator type $T.")) identityoperator(::Type{T}, ::Type{S}, b::Basis) where {T<:AbstractOperator,S} = identityoperator(T,S,b,b) identityoperator(::Type{T}, bases::Basis...) where T<:AbstractOperator = identityoperator(T,eltype(T),bases...) -identityoperator(b::Basis) = identityoperator(b,b) identityoperator(op::T) where {T<:AbstractOperator} = identityoperator(T, op.basis_l, op.basis_r) # Catch case where eltype cannot be inferred from type; this is a bit hacky diff --git a/src/operators_lazyproduct.jl b/src/operators_lazyproduct.jl index 3027f761..fbbbecfa 100644 --- a/src/operators_lazyproduct.jl +++ b/src/operators_lazyproduct.jl @@ -85,6 +85,7 @@ function tensor(a::LazyProduct{B1, B2, F, T, KTL, BTR},b::Operator{B3,B4}) where end function mul!(result::Ket{B1},a::LazyProduct{B1,B2},b::Ket{B2},alpha,beta) where {B1,B2} + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) if length(a.operators)==1 mul!(result,a.operators[1],b,a.factor*alpha,beta) else @@ -98,6 +99,7 @@ function mul!(result::Ket{B1},a::LazyProduct{B1,B2},b::Ket{B2},alpha,beta) where end function mul!(result::Bra{B2},a::Bra{B1},b::LazyProduct{B1,B2},alpha,beta) where {B1,B2} + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) if length(b.operators)==1 mul!(result, a, b.operators[1],b.factor*alpha,beta) else @@ -111,6 +113,7 @@ function mul!(result::Bra{B2},a::Bra{B1},b::LazyProduct{B1,B2},alpha,beta) where end function mul!(result::Operator{B1,B3,T},a::LazyProduct{B1,B2},b::Operator{B2,B3},alpha,beta) where {B1,B2,B3,T} + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) if length(a.operators) == 1 mul!(result,a.operators[1],b,a.factor*alpha,beta) else @@ -127,6 +130,7 @@ function mul!(result::Operator{B1,B3,T},a::LazyProduct{B1,B2},b::Operator{B2,B3} end function mul!(result::Operator{B1,B3,T},a::Operator{B1,B2},b::LazyProduct{B2,B3},alpha,beta) where {B1,B2,B3,T} + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) if length(b.operators) == 1 mul!(result, a, b.operators[1],b.factor*alpha,beta) else diff --git a/src/operators_lazysum.jl b/src/operators_lazysum.jl index ef4fc6c8..c61d4eeb 100644 --- a/src/operators_lazysum.jl +++ b/src/operators_lazysum.jl @@ -159,11 +159,20 @@ function embed(basis_l::CompositeBasis, basis_r::CompositeBasis, index::Integer, LazySum(basis_l, basis_r, op.factors, ((embed(basis_l, basis_r, index, o) for o in op.operators)...,)) # dispatch to fast-path single-index `embed` end +function _zero_op_mul!(data, beta) + if iszero(beta) + fill!(data, zero(eltype(data))) + elseif !isone(beta) + data .*= beta + end + return data +end + # Fast in-place multiplication function mul!(result::Ket{B1},a::LazySum{B1,B2},b::Ket{B2},alpha,beta) where {B1,B2} - if length(a.operators) == 0 + if length(a.operators) == 0 || iszero(alpha) _check_mul!_dim_compatibility(size(result), size(a), size(b)) - result.data .*= beta + _zero_op_mul!(result.data, beta) else mul!(result,a.operators[1],b,alpha*a.factors[1],beta) for i=2:length(a.operators) @@ -174,9 +183,9 @@ function mul!(result::Ket{B1},a::LazySum{B1,B2},b::Ket{B2},alpha,beta) where {B1 end function mul!(result::Bra{B2},a::Bra{B1},b::LazySum{B1,B2},alpha,beta) where {B1,B2} - if length(b.operators) == 0 + if length(b.operators) == 0 || iszero(alpha) _check_mul!_dim_compatibility(size(result), reverse(size(b)), size(a)) - result.data .*= beta + _zero_op_mul!(result.data, beta) else mul!(result,a,b.operators[1],alpha*b.factors[1],beta) for i=2:length(b.operators) @@ -187,9 +196,9 @@ function mul!(result::Bra{B2},a::Bra{B1},b::LazySum{B1,B2},alpha,beta) where {B1 end function mul!(result::Operator{B1,B3},a::LazySum{B1,B2},b::Operator{B2,B3},alpha,beta) where {B1,B2,B3} - if length(a.operators) == 0 + if length(a.operators) == 0 || iszero(alpha) _check_mul!_dim_compatibility(size(result), size(a), size(b)) - result.data .*= beta + _zero_op_mul!(result.data, beta) else mul!(result,a.operators[1],b,alpha*a.factors[1],beta) for i=2:length(a.operators) @@ -199,9 +208,9 @@ function mul!(result::Operator{B1,B3},a::LazySum{B1,B2},b::Operator{B2,B3},alpha return result end function mul!(result::Operator{B1,B3},a::Operator{B1,B2},b::LazySum{B2,B3},alpha,beta) where {B1,B2,B3} - if length(b.operators) == 0 + if length(b.operators) == 0 || iszero(alpha) _check_mul!_dim_compatibility(size(result), size(a), size(b)) - result.data .*= beta + _zero_op_mul!(result.data, beta) else mul!(result,a,b.operators[1],alpha*b.factors[1],beta) for i=2:length(b.operators) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index 4de51f59..c6713adb 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -342,6 +342,23 @@ function _tp_matmul_mid!(result, a::AbstractMatrix, loc::Integer, b, α::Number, br = Base.ReshapedArray(b, (sz_b_1, size(b, loc), sz_b_3), ()) result_r = Base.ReshapedArray(result, (sz_b_1, size(a, 1), sz_b_3), ()) + if a isa FillArrays.Eye + # Square Eyes are skipped higher up. This handles the non-square case. + size(b, loc) == size(a, 2) && size(result, loc) == size(a, 1) || throw(DimensionMismatch("Dimensions of Eye matrix do not match subspace dimensions.")) + d = min(size(a)...) + + if iszero(β) + # Need to handle this separately, as the values in `result` may not be valid numbers + fill!(result, zero(eltype(result))) + @strided result_r[:, 1:d, :] .= α .* br[:, 1:d, :] + else + rmul!(result, β) + @strided result_r[:, 1:d, :] .+= α .* br[:, 1:d, :] + end + + return result + end + # Try to "minimize" the transpose for efficiency. move_left = sz_b_1 < sz_b_3 perm = move_left ? (2,1,3) : (1,3,2) @@ -351,7 +368,7 @@ function _tp_matmul_mid!(result, a::AbstractMatrix, loc::Integer, b, α::Number, #permutedims!(br_p, br, perm) result_r_p = _tp_matmul_get_tmp(eltype(result_r), ((size(result_r, i) for i in perm)...,), :_tp_matmul_mid_out, result_r) - β == 0.0 || @strided permutedims!(result_r_p, result_r, perm) + iszero(β) || @strided permutedims!(result_r_p, result_r, perm) #β == 0.0 || permutedims!(result_r_p, result_r, perm) if move_left @@ -395,103 +412,63 @@ function _tp_sum_get_tmp(op::AbstractMatrix{T}, loc::Integer, arr::AbstractArray _tp_matmul_get_tmp(promote_type(T,S), shp, sym, arr) end +# Eyes need not be identities, but square Eyes are. +_is_square_eye(::AbstractArray) = false +_is_square_eye(::FillArrays.SquareEye) = true +_is_square_eye(x::FillArrays.Eye) = size(x, 1) == size(x, 2) +_is_square_eye(x::LinearAlgebra.Adjoint) = _is_square_eye(parent(x)) +_is_square_eye(x::LinearAlgebra.Transpose) = _is_square_eye(parent(x)) + #Apply a tensor product of operators to a vector. function _tp_sum_matmul!(result_data, tp_ops, iso_ops, b_data, alpha, beta) - iszero(alpha) && return rmul!(result_data, beta) - if iso_ops === nothing - n_ops = length(tp_ops[1]) - ops = zip(tp_ops...) + ops = tp_ops else - n_ops = length(tp_ops[1]) + length(iso_ops[1]) - ops = Iterators.flatten((zip(tp_ops...), zip(iso_ops...))) + ops = (tp_ops..., iso_ops...) end + n_ops = length(ops) + # TODO: Perhaps replace with a single for loop and branch inside? if n_ops == 0 - result_data .= alpha .* b_data + beta .* result_data + if iszero(beta) + @. result_data = alpha * b_data + else + @. result_data = alpha * b_data + beta * result_data + end elseif n_ops == 1 # Can add directly to the output array. - _tp_matmul!(result_data, first(ops)..., b_data, alpha, beta) + _tp_matmul!(result_data, ops[1][1], ops[1][2], b_data, alpha, beta) elseif n_ops == 2 # One temporary vector needed. - op1, istate = iterate(ops)::Tuple # "not-nothing" assertion to help type inference - tmp = _tp_sum_get_tmp(op1..., b_data, :_tp_sum_matmul_tmp1) - _tp_matmul!(tmp, op1..., b_data, alpha, zero(beta)) + tmp = _tp_sum_get_tmp(ops[1][1], ops[1][2], b_data, :_tp_sum_matmul_tmp1) + _tp_matmul!(tmp, ops[1][1], ops[1][2], b_data, alpha, zero(beta)) - op2, istate = iterate(ops, istate)::Tuple # "not-nothing" assertion to help type inference - _tp_matmul!(result_data, op2..., tmp, one(alpha), beta) + _tp_matmul!(result_data, ops[2][1], ops[2][2], tmp, one(alpha), beta) else # At least two temporary vectors needed. # Symbols identify computation stages in the cache. sym1 = :_tp_sum_matmul_tmp1 sym2 = :_tp_sum_matmul_tmp2 - op1, istate = iterate(ops)::Tuple # "not-nothing" assertion to help type inference - tmp1 = _tp_sum_get_tmp(op1..., b_data, sym1) - _tp_matmul!(tmp1, op1..., b_data, alpha, zero(beta)) + tmp1 = _tp_sum_get_tmp(ops[1][1], ops[1][2], b_data, sym1) + _tp_matmul!(tmp1, ops[1][1], ops[1][2], b_data, alpha, zero(beta)) - next = iterate(ops, istate)::Tuple # "not-nothing" assertion to help type inference - for _ in 2:n_ops-1 - op, istate = next - tmp2 = _tp_sum_get_tmp(op..., tmp1, sym2) - _tp_matmul!(tmp2, op..., tmp1, one(alpha), zero(beta)) + for i in 2:n_ops-1 + tmp2 = _tp_sum_get_tmp(ops[i][1], ops[i][2], tmp1, sym2) + _tp_matmul!(tmp2, ops[i][1], ops[i][2], tmp1, one(alpha), zero(beta)) tmp1, tmp2 = tmp2, tmp1 sym1, sym2 = sym2, sym1 - next = iterate(ops, istate)::Tuple # "not-nothing" assertion to help type inference end - op, istate = next - _tp_matmul!(result_data, op..., tmp1, one(alpha), beta) + _tp_matmul!(result_data, ops[n_ops][1], ops[n_ops][2], tmp1, one(alpha), beta) end result_data end -# Represents a rectangular "identity" matrix of ones along the diagonal. -struct _SimpleIsometry{I<:Integer} # We have no need to subtype AbstractMatrix - shape::Tuple{I,I} - function _SimpleIsometry(d1::I, d2::I) where {I<:Integer} - new{I}((d1, d2)) - end -end -Base.size(A::_SimpleIsometry) = A.shape -Base.size(A::_SimpleIsometry, i) = A.shape[i] - -function _tp_sum_get_tmp(op::_SimpleIsometry, loc::Integer, arr::AbstractArray{S,N}, sym) where {S,N} - shp = ntuple(i -> i == loc ? size(op,1) : size(arr,i), N) - _tp_matmul_get_tmp(S, shp, sym, arr) -end - -function _tp_matmul!(result, a::_SimpleIsometry, loc::Integer, b, α::Number, β::Number) - shp_b_1 = 1 - for i in 1:loc-1 - shp_b_1 *= size(b,i) - end - shp_b_3 = 1 - for i in loc+1:ndims(b) - shp_b_3 *= size(b,i) - end - - @assert size(b, loc) == size(a, 2) - - br = Base.ReshapedArray(b, (shp_b_1, size(b, loc), shp_b_3), ()) - result_r = Base.ReshapedArray(result, (shp_b_1, size(a, 1), shp_b_3), ()) - - d = min(size(a)...) - - if β != 0 - rmul!(result, β) - @strided result_r[:, 1:d, :] .+= α .* br[:, 1:d, :] - else - @strided result_r[:, 1:d, :] .= α .* br[:, 1:d, :] - @strided result_r[:, d+1:end, :] .= zero(eltype(result)) - end - - result -end - -function _explicit_isometries(used_indices, bl::Basis, br::Basis, shift=0) +# Insert explicit Eye operators where left and right bases have different sizes. +function _explicit_isometries(::Type{T}, used_indices, bl::Basis, br::Basis, shift=0) where T shp_l = _comp_size(bl) shp_r = _comp_size(br) shp_l != shp_r || return nothing @@ -501,10 +478,10 @@ function _explicit_isometries(used_indices, bl::Basis, br::Basis, shift=0) for (i, (sl, sr)) in enumerate(zip(shp_l, shp_r)) if (sl != sr) && !(i + shift in used_indices) if isos === nothing - isos = [_SimpleIsometry(sl, sr)] + isos = [Eye{T}(sl, sr)] iso_inds = [i + shift] else - push!(isos, _SimpleIsometry(sl, sr)) + push!(isos, Eye{T}(sl, sr)) push!(iso_inds, i + shift) end end @@ -512,15 +489,37 @@ function _explicit_isometries(used_indices, bl::Basis, br::Basis, shift=0) if isos === nothing return nothing end - isos, iso_inds + res = tuple(zip(isos, iso_inds)...) + return res end # To get the shape of a CompositeBasis with number of dims inferrable at compile-time _comp_size(b::CompositeBasis) = tuple((length(b_) for b_ in b.bases)...) _comp_size(b::Basis) = (length(b),) +_is_pure_sparse(operators) = all(o isa Union{SparseOpPureType,EyeOpType} for o in operators) + +# Prepare the tensor-product operator and indices tuple +function _tpops_tuple(operators, indices; shift=0, op_transform=identity) + length(operators) == 0 == length(indices) && return () + + op_pairs = tuple(((op_transform(op.data), i + shift) for (op, i) in zip(operators, indices))...) + + # Filter out identities: + # This induces a non-trivial cost only if _is_square_eye is not inferrable. + # This happens if we have Eyes that are not SquareEyes. + # This can happen if the user constructs LazyTensor operators including + # explicit identityoperator(b,b). + filtered = filter(p->!_is_square_eye(p[1]), op_pairs) + return filtered +end + +_tpops_tuple(a::LazyTensor; shift=0, op_transform=identity) = _tpops_tuple((a.operators...,), (a.indices...,); shift, op_transform) + function mul!(result::Ket{B1}, a::LazyTensor{B1,B2,F,I,T}, b::Ket{B2}, alpha, beta) where {B1<:Basis,B2<:Basis, F,I,T<:Tuple{Vararg{DataOperator}}} - if length(a.operators) > 0 && all(o isa SparseOpPureType for o in a.operators) + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) + + if length(a.operators) > 0 && _is_pure_sparse(a.operators) return _mul_puresparse!(result, a, b, alpha, beta) end @@ -530,45 +529,51 @@ function mul!(result::Ket{B1}, a::LazyTensor{B1,B2,F,I,T}, b::Ket{B2}, alpha, be b_data = Base.ReshapedArray(b.data, _comp_size(basis(b)), ()) result_data = Base.ReshapedArray(result.data, _comp_size(basis(result)), ()) - tp_ops = (tuple((op.data for op in a.operators)...), a.indices) - iso_ops = _explicit_isometries(a.indices, a.basis_l, a.basis_r) + tp_ops = _tpops_tuple(a) + iso_ops = _explicit_isometries(eltype(a), a.indices, a.basis_l, a.basis_r) _tp_sum_matmul!(result_data, tp_ops, iso_ops, b_data, alpha * a.factor, beta) result end function mul!(result::Bra{B2}, a::Bra{B1}, b::LazyTensor{B1,B2,F,I,T}, alpha, beta) where {B1<:Basis,B2<:Basis, F,I,T<:Tuple{Vararg{DataOperator}}} - if length(b.operators) > 0 && all(o isa SparseOpPureType for o in b.operators) + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) + + if length(b.operators) > 0 && _is_pure_sparse(b.operators) return _mul_puresparse!(result, a, b, alpha, beta) end a_data = Base.ReshapedArray(a.data, _comp_size(basis(a)), ()) result_data = Base.ReshapedArray(result.data, _comp_size(basis(result)), ()) - tp_ops = (tuple((transpose(op.data) for op in b.operators)...), b.indices) - iso_ops = _explicit_isometries(b.indices, b.basis_r, b.basis_l) + tp_ops = _tpops_tuple(b; op_transform=transpose) + iso_ops = _explicit_isometries(eltype(b), b.indices, b.basis_r, b.basis_l) _tp_sum_matmul!(result_data, tp_ops, iso_ops, a_data, alpha * b.factor, beta) result end function mul!(result::DenseOpType{B1,B3}, a::LazyTensor{B1,B2,F,I,T}, b::DenseOpType{B2,B3}, alpha, beta) where {B1<:Basis,B2<:Basis,B3<:Basis, F,I,T<:Tuple{Vararg{DataOperator}}} - if length(a.operators) > 0 && all(o isa SparseOpPureType for o in a.operators) && (b isa DenseOpPureType) + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) + + if length(a.operators) > 0 && _is_pure_sparse(a.operators) && (b isa DenseOpPureType) return _mul_puresparse!(result, a, b, alpha, beta) end b_data = Base.ReshapedArray(b.data, (_comp_size(b.basis_l)..., _comp_size(b.basis_r)...), ()) result_data = Base.ReshapedArray(result.data, (_comp_size(result.basis_l)..., _comp_size(result.basis_r)...), ()) - tp_ops = (tuple((op.data for op in a.operators)...), a.indices) - iso_ops = _explicit_isometries(a.indices, a.basis_l, a.basis_r) + tp_ops = _tpops_tuple(a) + iso_ops = _explicit_isometries(eltype(a), a.indices, a.basis_l, a.basis_r) _tp_sum_matmul!(result_data, tp_ops, iso_ops, b_data, alpha * a.factor, beta) result end function mul!(result::DenseOpType{B1,B3}, a::DenseOpType{B1,B2}, b::LazyTensor{B2,B3,F,I,T}, alpha, beta) where {B1<:Basis,B2<:Basis,B3<:Basis, F,I,T<:Tuple{Vararg{DataOperator}}} - if length(b.operators) > 0 && all(o isa SparseOpPureType for o in b.operators) && (a isa DenseOpPureType) + iszero(alpha) && (_zero_op_mul!(result.data, beta); return result) + + if length(b.operators) > 0 && _is_pure_sparse(b.operators) && (a isa DenseOpPureType) return _mul_puresparse!(result, a, b, alpha, beta) end @@ -576,8 +581,8 @@ function mul!(result::DenseOpType{B1,B3}, a::DenseOpType{B1,B2}, b::LazyTensor{B result_data = Base.ReshapedArray(result.data, (_comp_size(result.basis_l)..., _comp_size(result.basis_r)...), ()) shft = length(a.basis_l.shape) # b must be applied to the "B2" side of a - tp_ops = (tuple((transpose(op.data) for op in b.operators)...), tuple((i + shft for i in b.indices)...)) - iso_ops = _explicit_isometries(tp_ops[2], b.basis_r, b.basis_l, shft) + tp_ops = _tpops_tuple(b; shift=shft, op_transform=transpose) + iso_ops = _explicit_isometries(eltype(b), ((i + shft for i in b.indices)...,), b.basis_r, b.basis_l, shft) _tp_sum_matmul!(result_data, tp_ops, iso_ops, a_data, alpha * b.factor, beta) result @@ -610,15 +615,15 @@ function _gemm_recursive_dense_lazy(i_k, N_k, K, J, val, _gemm_recursive_dense_lazy(i_k+1, N_k, K_, J_, val_, shape, strides_k, strides_j, indices, h, op, result) end end - else + return nothing + elseif !isa(h_i, EyeOpType) throw(ArgumentError("gemm! of LazyTensor is not implemented for $(typeof(h_i))")) end - else - @inbounds for k=1:shape[i_k] - K_ = K + strides_k[i_k]*(k-1) - J_ = J + strides_j[i_k]*(k-1) - _gemm_recursive_dense_lazy(i_k + 1, N_k, K_, J_, val, shape, strides_k, strides_j, indices, h, op, result) - end + end + @inbounds for k=1:shape[i_k] + K_ = K + strides_k[i_k]*(k-1) + J_ = J + strides_j[i_k]*(k-1) + _gemm_recursive_dense_lazy(i_k + 1, N_k, K_, J_, val, shape, strides_k, strides_j, indices, h, op, result) end end @@ -647,15 +652,15 @@ function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, _gemm_recursive_lazy_dense(i_k+1, N_k, K_, J_, val_, shape, strides_k, strides_j, indices, h, op, result) end end - else + return nothing + elseif !isa(h_i, EyeOpType) # identity operator get handled below throw(ArgumentError("gemm! of LazyTensor is not implemented for $(typeof(h_i))")) end - else - @inbounds for k=1:shape[i_k] - K_ = K + strides_k[i_k]*(k-1) - J_ = J + strides_j[i_k]*(k-1) - _gemm_recursive_lazy_dense(i_k + 1, N_k, K_, J_, val, shape, strides_k, strides_j, indices, h, op, result) - end + end + @inbounds for k=1:shape[i_k] + K_ = K + strides_k[i_k]*(k-1) + J_ = J + strides_j[i_k]*(k-1) + _gemm_recursive_lazy_dense(i_k + 1, N_k, K_, J_, val, shape, strides_k, strides_j, indices, h, op, result) end end @@ -683,7 +688,7 @@ function _check_mul!_aliasing_compatibility(R, A, B) end -function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, beta, result::AbstractArray) where {B1,B2,F,I,T} if op isa AbstractVector # _gemm_recursive_dense_lazy will treat `op` as a `Bra` _check_mul!_aliasing_compatibility(result, op, h) @@ -701,7 +706,7 @@ function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::AbstractArray, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::AbstractArray, beta, result::AbstractArray) where {B1,B2,F,I,T} check_mul!_compatibility(result, h, op) if iszero(beta) fill!(result, beta) @@ -720,8 +725,8 @@ function _get_shape_and_strides(h) return shape, strides_j, strides_k end -_mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) -_mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor{B2,B3,F,I,T},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, op.data, h, beta, result.data); result) -_mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a, b.data, beta, result.data); result) -_mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a.data, b, beta, result.data); result) +_mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) +_mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor{B2,B3,F,I,T},alpha,beta) where {B1,B2,B3,F,I,T} = (_gemm_puresparse(alpha, op.data, h, beta, result.data); result) +_mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T} = (_gemm_puresparse(alpha, a, b.data, beta, result.data); result) +_mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T} = (_gemm_puresparse(alpha, a.data, b, beta, result.data); result) diff --git a/src/operators_sparse.jl b/src/operators_sparse.jl index f73edb85..168488f0 100644 --- a/src/operators_sparse.jl +++ b/src/operators_sparse.jl @@ -60,11 +60,24 @@ function permutesystems(rho::SparseOpPureType{B1,B2}, perm) where {B1<:Composite SparseOperator(permutesystems(rho.basis_l, perm), permutesystems(rho.basis_r, perm), data) end -identityoperator(::Type{T}, ::Type{S}, b1::Basis, b2::Basis) where {T<:DataOperator,S<:Number} = +identityoperator(::Type{T}, ::Type{S}, b1::Basis, b2::Basis) where {T<:SparseOpType,S<:Number} = SparseOperator(b1, b2, sparse(one(S)*I, length(b1), length(b2))) +identityoperator(::Type{T}, ::Type{S}, b::Basis) where {T<:SparseOpType,S<:Number} = + SparseOperator(b, b, sparse(one(S)*I, length(b), length(b))) + +const EyeOpPureType{BL,BR} = Operator{BL,BR,<:Eye} +const EyeOpAdjType{BL,BR} = Operator{BL,BR,<:Adjoint{<:Number,<:Eye}} +const EyeOpType{BL,BR} = Union{EyeOpPureType{BL,BR},EyeOpAdjType{BL,BR}} + +identityoperator(::Type{T}, ::Type{S}, b1::Basis, b2::Basis) where {T<:DataOperator,S<:Number} = + Operator(b1, b2, Eye{S}(length(b1), length(b2))) +identityoperator(::Type{T}, ::Type{S}, b::Basis) where {T<:DataOperator,S<:Number} = + Operator(b, b, Eye{S}(length(b))) + identityoperator(::Type{T}, b1::Basis, b2::Basis) where T<:Number = identityoperator(DataOperator, T, b1, b2) -identityoperator(::Type{T}, b::Basis) where T<:Number = identityoperator(T, b, b) +identityoperator(::Type{T}, b::Basis) where T<:Number = identityoperator(DataOperator, T, b) identityoperator(b1::Basis, b2::Basis) = identityoperator(ComplexF64, b1, b2) +identityoperator(b::Basis) = identityoperator(ComplexF64, b) """ diagonaloperator(b::Basis) diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 40b4f660..1e1985f7 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -1,9 +1,11 @@ using Test using QuantumOpticsBase using Aqua +using FillArrays @testset "aqua" begin - Aqua.test_all(QuantumOpticsBase, - piracy=false # TODO: Due to Base methods in QuantumOpticsBase, for types defined in QuantumInterface + Aqua.test_all(QuantumOpticsBase; + piracy=false, # TODO: Due to Base methods in QuantumOpticsBase, for types defined in QuantumInterface + ambiguities=(exclude=[FillArrays.reshape],) # Due to https://github.com/JuliaArrays/FillArrays.jl/issues/105#issuecomment-1518406018 ) end # testset diff --git a/test/test_operators_lazyproduct.jl b/test/test_operators_lazyproduct.jl index 2b26f11c..82e3890c 100644 --- a/test/test_operators_lazyproduct.jl +++ b/test/test_operators_lazyproduct.jl @@ -170,6 +170,10 @@ for N=1:3 QuantumOpticsBase.mul!(result,op,state,alpha,beta) @test 1e-11 > D(result, alpha*op_*state + beta*result_) + result = copy(result_) + QuantumOpticsBase.mul!(result,op,state,0,beta) + @test 1e-11 > D(result, beta*result_) + state = Bra(b_l, rand(ComplexF64, length(b_l))) result_ = randstate(iseven(N) ? b_l : b_r)' result = copy(result_) @@ -182,6 +186,10 @@ for N=1:3 QuantumOpticsBase.mul!(result,state,op,alpha,beta) @test 1e-11 > D(result, alpha*state*op_ + beta*result_) + result = copy(result_) + QuantumOpticsBase.mul!(result,state,op,0,beta) + @test 1e-11 > D(result, beta*result_) + # Test gemm state = randoperator(iseven(N) ? b_l : b_r, b_r) result_ = randoperator(b_l, b_r) @@ -195,6 +203,10 @@ for N=1:3 QuantumOpticsBase.mul!(result,op,state,alpha,beta) @test 1e-11 > D(result, alpha*op_*state + beta*result_) + result = copy(result_) + QuantumOpticsBase.mul!(result,op,state,0,beta) + @test 1e-11 > D(result, beta*result_) + state = randoperator(b_l, b_l) result_ = randoperator(b_l, iseven(N) ? b_l : b_r) result = copy(result_) @@ -206,6 +218,10 @@ for N=1:3 beta = complex(2.1) QuantumOpticsBase.mul!(result,state,op,alpha,beta) @test 1e-11 > D(result, alpha*state*op_ + beta*result_) + + result = copy(result_) + QuantumOpticsBase.mul!(result,state,op,0,beta) + @test 1e-11 > D(result, beta*result_) end end # testset diff --git a/test/test_operators_lazysum.jl b/test/test_operators_lazysum.jl index 99be53d8..887a2547 100644 --- a/test/test_operators_lazysum.jl +++ b/test/test_operators_lazysum.jl @@ -300,7 +300,7 @@ op_ = 0.1*op1 + 0.3*op2 + 1.2*op3 state = randoperator(b_r, b_r) result_ = randoperator(b_l, b_r) -result = deepcopy(result_) +result = NaN * deepcopy(result_) # with beta=0, NaNs should be killed QuantumOpticsBase.mul!(result,op,state,complex(1.),complex(0.)) @test 1e-12 > D(result, op_*state) @@ -324,7 +324,7 @@ result = deepcopy(result_) QuantumOpticsBase.mul!(result,state,op,complex(1.),complex(0.)) @test 1e-12 > D(result, state*op_) -result = deepcopy(result_) +result = NaN * deepcopy(result_) # with beta=0, NaNs should be killed QuantumOpticsBase.mul!(result,state,zero_op,complex(1.),complex(0.)) @test 1e-12 > D(result, state*zero_op_) diff --git a/test/test_operators_lazytensor.jl b/test/test_operators_lazytensor.jl index c7cef8b9..56e92e7f 100644 --- a/test/test_operators_lazytensor.jl +++ b/test/test_operators_lazytensor.jl @@ -10,6 +10,7 @@ mutable struct test_lazytensor{BL<:Basis,BR<:Basis} <: AbstractOperator{BL,BR} end Base.eltype(::test_lazytensor) = ComplexF64 +@testset "operators-lazytensor" begin Random.seed!(0) @@ -407,7 +408,6 @@ result = deepcopy(result_) QuantumOpticsBase.mul!(result,op,state,alpha,beta) @test 1e-12 > D(result, alpha*op_*state + beta*result_) - # Test gemm errors test_op = test_lazytensor(b1a, b1a, rand(2, 2)) test_lazy = LazyTensor(tensor(b1a, b1a), [1, 2], (test_op, test_op)) @@ -441,3 +441,57 @@ Lop1 = LazyTensor(b1^2, b2^2, 2, sparse(randoperator(b1, b2))) @test_throws DimensionMismatch sparse(Lop1)*Lop1 @test_throws DimensionMismatch Lop1*dense(Lop1) @test_throws DimensionMismatch Lop1*sparse(Lop1) + +end # testset + +@testset "LazyTensor: explicit isometries" begin + +D(op1::AbstractOperator, op2::AbstractOperator) = abs(tracedistance_nh(dense(op1), dense(op2))) +D(x1::StateVector, x2::StateVector) = norm(x2-x1) + +# Test explicit identities and isometries +bl = FockBasis(2) ⊗ GenericBasis(2) ⊗ SpinBasis(1//2) ⊗ GenericBasis(1) ⊗ GenericBasis(2) +br = FockBasis(2) ⊗ GenericBasis(2) ⊗ SpinBasis(1//2) ⊗ GenericBasis(2) ⊗ GenericBasis(1) + +iso = identityoperator(bl.bases[5], br.bases[5]) + +n1 = LazyTensor(bl, br, (1,3), (number(bl.bases[1]), sigmax(bl.bases[3]))) +n1_sp = LazyTensor(bl, br, (1,2,3,5), (number(bl.bases[1]), identityoperator(bl.bases[2]), sigmax(bl.bases[3]), iso)) +n1_de = LazyTensor(bl, br, (1,2,3,5), (dense(number(bl.bases[1])), identityoperator(bl.bases[2]), sigmax(bl.bases[3]), iso)) + +@test dense(n1) == dense(n1_sp) +@test dense(n1) == dense(n1_de) + +state = randoperator(br,br) + +@test 1e-12 > D(n1 * state, n1_sp * state) +@test 1e-12 > D(n1 * state, n1_de * state) + +out = randoperator(bl,br) +alpha = randn() +beta = randn() +out_ref = mul!(copy(out), n1, state, alpha, beta) +@test 1e-12 > D(out_ref, mul!(copy(out), n1_sp, state, alpha, beta)) +@test 1e-12 > D(out_ref, mul!(copy(out), n1_de, state, alpha, beta)) + +out_NaN = NaN * out +out_ref = mul!(copy(out_NaN), n1, state, alpha, 0) +@test 1e-12 > D(out_ref, mul!(copy(out_NaN), n1_sp, state, alpha, 0)) +@test 1e-12 > D(out_ref, mul!(copy(out_NaN), n1_de, state, alpha, 0)) + +out_ref = mul!(copy(out), n1, state, 0, beta) +@test 1e-12 > D(out_ref, mul!(copy(out), n1_sp, state, 0, beta)) +@test 1e-12 > D(out_ref, mul!(copy(out), n1_de, state, 0, beta)) + +out_NaN = NaN * out +out_ref = mul!(copy(out_NaN), n1, state, 0, 0) +@test 1e-12 > D(out_ref, mul!(copy(out_NaN), n1_sp, state, 0, 0)) +@test 1e-12 > D(out_ref, mul!(copy(out_NaN), n1_de, state, 0, 0)) + +state = randoperator(bl,bl) +out_ref = mul!(copy(out), state, n1, alpha, beta) +@test 1e-12 > D(out_ref, mul!(copy(out), state, n1_sp, alpha, beta)) +@test 1e-12 > D(out_ref, mul!(copy(out), state, n1_de, alpha, beta)) + +end + diff --git a/test/test_operators_sparse.jl b/test/test_operators_sparse.jl index d483e5ad..5e1c50cc 100644 --- a/test/test_operators_sparse.jl +++ b/test/test_operators_sparse.jl @@ -112,6 +112,11 @@ I = identityoperator(SparseOpType, b_l) @test 1e-11 > D(xbra1*I, xbra1) @test I == identityoperator(SparseOpType, b1a) ⊗ identityoperator(SparseOpType, b2a) ⊗ identityoperator(SparseOpType, b3a) +IEye = identityoperator(b_l) +@test isa(IEye, EyeOpType) +@test sparse(IEye) == I +Icomp = identityoperator(b1a) ⊗ identityoperator(b2a) ⊗ identityoperator(b3a) +@test IEye == Icomp # Test tr and normalize op = sparse(DenseOperator(GenericBasis(3), [1 3 2;5 2 2;-1 2 5]))