Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CUDA-compatibility #24

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd"

[weakdeps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

[extensions]
AbstractOperatorsCudaExt = "CUDA"

[compat]
AbstractFFTs = "1"
DSP = "0.7"
Expand All @@ -21,6 +27,7 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

[targets]
test = ["Printf", "Random", "SparseArrays", "Test"]
test = ["Printf", "Random", "SparseArrays", "Test", "CUDA"]
8 changes: 8 additions & 0 deletions ext/AbstractOperatorsCudaExt/AbstractOperatorsCudaExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
module AbstractOperatorsCudaExt

using CUDA
import AbstractOperators: storageTypeDisplayString

storageTypeDisplayString(::Type{T}) where {T<:CuArray} = "ᶜᵘ"

end # module AbstractOperatorsCudaExt
2 changes: 2 additions & 0 deletions src/calculus/AdjointOperator.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ size(L::AdjointOperator) = size(L.A,2), size(L.A,1)

domainType(L::AdjointOperator) = codomainType(L.A)
codomainType(L::AdjointOperator) = domainType(L.A)
domainStorageType(L::AdjointOperator) = codomainStorageType(L.A)
codomainStorageType(L::AdjointOperator) = domainStorageType(L.A)
nantonel marked this conversation as resolved.
Show resolved Hide resolved

fun_name(L::AdjointOperator) = fun_name(L.A)*"ᵃ"

Expand Down
117 changes: 56 additions & 61 deletions src/linearoperators/Conv.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
export Conv

abstract type AbstractConv{T,N,H<:AbstractArray} <: LinearOperator end

"""
`Conv([domainType=Float64::Type,] dim_in::Tuple, h::AbstractVector)`

Expand All @@ -8,91 +10,84 @@ export Conv
Creates a `LinearOperator` which, when multiplied with an array `x::AbstractVector`, returns the convolution between `x` and `h`. Uses `conv` and hence FFT algorithm.

"""
struct Conv{T,
H <: AbstractVector{T},
Hc <: AbstractVector,
} <: LinearOperator
dim_in::Tuple{Int}
h::H
buf::H
buf_c1::Hc
buf_c2::Hc
R::AbstractFFTs.Plan
I::AbstractFFTs.Plan
struct Conv{T,N,H<:AbstractArray{T,N},Hc} <: AbstractConv{T,N,H}
dim_in::NTuple{N,Int}
h::H
buf::H
buf_c1::Hc
buf_c2::Hc
R::AbstractFFTs.Plan
I::AbstractFFTs.Plan
end

# Constructors

isTypeReal(::Type{T}) where {T} = T <: Real

###standard constructor
function Conv(DomainType::Type, dim_in::Tuple{Int}, h::H) where {H<:AbstractVector}
eltype(h) != DomainType && error("eltype(h) is $(eltype(h)), should be $(DomainType)")
function Conv(domainType::Type, dim_in::NTuple{N,Int}, h::H) where {N,H<:AbstractArray}
eltype(h) != domainType && error("eltype(h) is $(eltype(h)), should be $(domainType)")

if isTypeReal(DomainType)
buf = zeros(DomainType,dim_in[1]+length(h)-1)
buf = similar(h, domainType, dim_in .+ size(h) .- 1)
if isTypeReal(domainType)
R = plan_rfft(buf)
buf_c1 = zeros(Complex{DomainType}, div(dim_in[1]+length(h)-1,2)+1)
buf_c2 = zeros(Complex{DomainType}, div(dim_in[1]+length(h)-1,2)+1)
I = plan_irfft(buf_c1,dim_in[1]+length(h)-1)
complex_type = Complex{domainType}
buf_size = ntuple(d -> d == 1 ? size(buf, d) >> 1 + 1 : size(buf, d), Val(N))
buf_c1 = similar(h, Complex{domainType}, buf_size)
I = plan_irfft(buf_c1, size(buf, 1))
else
buf = zeros(DomainType,dim_in[1]+length(h)-1)
R = plan_fft(buf)
buf_c1 = zeros(DomainType, div(dim_in[1]+length(h)-1,2)+1)
buf_c2 = zeros(DomainType, div(dim_in[1]+length(h)-1,2)+1)
I = plan_ifft(buf_c1,dim_in[1]+length(h)-1)
buf_c1 = similar(buf)
complex_type = domainType
I = FFTW.plan_inv(R)
end
Conv{DomainType, H, typeof(buf_c1)}(dim_in,h,buf,buf_c1,buf_c2,R,I)
buf_c2 = similar(buf_c1)
return Conv{domainType,N,H,typeof(buf_c1)}(dim_in, h, buf, buf_c1, buf_c2, R, I)
end

Conv(dim_in::NTuple{N,Int}, h::H) where {H<:AbstractVector, N} = Conv(eltype(h), dim_in, h)
Conv(x::H, h::H) where {H} = Conv(eltype(x), size(x), h)
Conv(dim_in::NTuple{N,Int}, h::H) where {H<:AbstractArray,N} = Conv(eltype(h), dim_in, h)
Conv(x::H, h::H) where {H<:AbstractArray} = Conv(eltype(x), size(x), h)

# Mappings

function mul!(y::H, A::Conv{T,H}, b::H) where {T, H}
#y .= conv(A.h,b) #naive implementation
for i in eachindex(A.buf)
A.buf[i] = i <= length(A.h) ? A.h[i] : zero(T)
end
mul!(A.buf_c1, A.R, A.buf)
for i in eachindex(A.buf)
A.buf[i] = i <= length(b) ? b[i] : zero(T)
end
mul!(A.buf_c2, A.R, A.buf)
A.buf_c2 .*= A.buf_c1
mul!(y,A.I,A.buf_c2)

function mul!(y::AbstractArray{T,N}, A::AbstractConv{T,N}, b::AbstractArray{T,N}) where {T,N}
#y .= conv(A.h,b) #naive implementation
fill!(A.buf, zero(T))
A.buf[CartesianIndices(A.h)] .= A.h
mul!(A.buf_c1, A.R, A.buf)
fill!(A.buf, zero(T))
A.buf[CartesianIndices(b)] .= b
mul!(A.buf_c2, A.R, A.buf)
A.buf_c2 .*= A.buf_c1
return mul!(y, A.I, A.buf_c2)
end

function mul!(y::H, L::AdjointOperator{C}, b::H) where {T, H, C <: Conv{T,H}}
#y .= xcorr(b,L.A.h)[size(L.A,1)[1]:end-length(L.A.h)+1] #naive implementation
for i in eachindex(L.A.buf)
ii = length(L.A.buf)-i+1
L.A.buf[ii] = i <= length(L.A.h) ? L.A.h[i] : zero(T)
end
mul!(L.A.buf_c1, L.A.R, L.A.buf)
for i in eachindex(L.A.buf)
L.A.buf[i] = b[i]
end
mul!(L.A.buf_c2, L.A.R, L.A.buf)
L.A.buf_c2 .*= L.A.buf_c1
mul!(L.A.buf,L.A.I,L.A.buf_c2)
y[1] = L.A.buf[end]
for i = 2:length(y)
y[i] = L.A.buf[i-1]
end
function mul!(
y::AbstractArray{T,N}, L::AdjointOperator{C}, b::AbstractArray{T,N}
) where {T,N,C<:AbstractConv{T,N}}
#y .= xcorr(b,L.A.h)[size(L.A.h,1)[1]:end-length(L.A.h)+1] #naive implementation
fill!(L.A.buf, zero(T))
L.A.buf[CartesianIndices(L.A.h)] .= L.A.h
mul!(L.A.buf_c1, L.A.R, L.A.buf)
fill!(L.A.buf, zero(T))
L.A.buf[CartesianIndices(b)] .= b
mul!(L.A.buf_c2, L.A.R, L.A.buf)
L.A.buf_c2 .*= conj.(L.A.buf_c1)
mul!(L.A.buf, L.A.I, L.A.buf_c2)
return y .= L.A.buf[CartesianIndices(y)]
end

# Properties

domainType(L::Conv{T}) where {T} = T
codomainType(L::Conv{T}) where {T} = T
domainType(::AbstractConv{T}) where {T} = T
codomainType(::AbstractConv{T}) where {T} = T
domainStorageType(::AbstractConv{T,N,H}) where {T,N,H} = H
codomainStorageType(::AbstractConv{T,N,H}) where {T,N,H} = H

#TODO find out a way to verify this,
is_full_row_rank(L::Conv) = true
is_full_column_rank(L::Conv) = true
is_full_row_rank(::AbstractConv) = true
is_full_column_rank(::AbstractConv) = true

size(L::Conv) = (L.dim_in[1]+length(L.h)-1,), L.dim_in
size(L::AbstractConv) = (L.dim_in[1] + length(L.h) - 1,), L.dim_in

fun_name(A::Conv) = "★"
fun_name(::AbstractConv) = "★"
66 changes: 38 additions & 28 deletions src/linearoperators/Eye.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
export Eye

abstract type AbstractEye{T,N,S<:AbstractArray} <: LinearOperator end

"""
`Eye([domainType=Float64::Type,] dim_in::Tuple)`

Expand All @@ -20,42 +22,50 @@ true
```

"""
struct Eye{T, N} <: LinearOperator
dim::NTuple{N, Integer}
struct Eye{T,N,S<:AbstractArray{T,N}} <: AbstractEye{T,N,S}
dim::NTuple{N,Integer}
end

# Constructors
###standard constructor Operator{N}(DomainType::Type, DomainDim::NTuple{N,Int})
Eye(DomainType::Type, DomainDim::NTuple{N,Int}) where {N} = Eye{DomainType,N}(DomainDim)
function Eye(
domainType::Type{T}, domainDim::NTuple{N,Int}, storageType::Type{S}=Array{T,N}
) where {N,T,S<:AbstractArray{T,N}}
return Eye{domainType,N,storageType}(domainDim)
end
###

Eye(t::Type, dims::Vararg{Integer}) = Eye(t,dims)
Eye(dims::NTuple{N, Integer}) where {N} = Eye(Float64,dims)
Eye(dims::Vararg{Integer}) = Eye(Float64,dims)
Eye(x::A) where {A <: AbstractArray} = Eye(eltype(x), size(x))
Eye(t::Type, dims::Vararg{Integer}) = Eye(t, dims)
Eye(dims::NTuple{N,Integer}) where {N} = Eye(Float64, dims)
Eye(dims::Vararg{Integer}) = Eye(Float64, dims)
Eye(x::A) where {A<:AbstractArray} = Eye(eltype(x), size(x), typeof(x))

# Mappings

mul!(y::AbstractArray{T, N}, L::Eye{T, N}, b::AbstractArray{T, N}) where {T, N} = y .= b
mul!(y::AbstractArray{T, N}, L::AdjointOperator{Eye{T, N}}, b::AbstractArray{T, N}) where {T, N} = mul!(y, L.A, b)
mul!(y::AbstractArray{T,N}, ::AbstractEye{T,N}, b::AbstractArray{T,N}) where {T,N} = y .= b
mul!(
y::AbstractArray{T,N}, ::AdjointOperator{E}, b::AbstractArray{T,N}
) where {T,N,E<:AbstractEye{T,N}} = y .= b

# Properties
diag(L::Eye) = 1.
diag_AcA(L::Eye) = 1.
diag_AAc(L::Eye) = 1.

domainType(L::Eye{T, N}) where {T, N} = T
codomainType(L::Eye{T, N}) where {T, N} = T

size(L::Eye) = (L.dim, L.dim)

fun_name(L::Eye) = "I"

is_eye(L::Eye) = true
is_diagonal(L::Eye) = true
is_AcA_diagonal(L::Eye) = true
is_AAc_diagonal(L::Eye) = true
is_orthogonal(L::Eye) = true
is_invertible(L::Eye) = true
is_full_row_rank(L::Eye) = true
is_full_column_rank(L::Eye) = true
diag(::AbstractEye) = 1.0
diag_AcA(::AbstractEye) = 1.0
diag_AAc(::AbstractEye) = 1.0

domainType(::AbstractEye{T,N}) where {T,N} = T
codomainType(::AbstractEye{T,N}) where {T,N} = T
domainStorageType(::AbstractEye{T,N,S}) where {T,N,S} = S
codomainStorageType(::AbstractEye{T,N,S}) where {T,N,S} = S

size(L::AbstractEye) = (L.dim, L.dim)

fun_name(::AbstractEye) = "I"

is_eye(::AbstractEye) = true
is_diagonal(::AbstractEye) = true
is_AcA_diagonal(::AbstractEye) = true
is_AAc_diagonal(::AbstractEye) = true
is_orthogonal(::AbstractEye) = true
is_invertible(::AbstractEye) = true
is_full_row_rank(::AbstractEye) = true
is_full_column_rank(::AbstractEye) = true
5 changes: 4 additions & 1 deletion src/properties.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,9 @@ allocate(::Type{T}, dims...) where {T <: AbstractArray} = T(undef, dims...)
allocate(::Type{ArrayPartition{T,S}}, dims...) where {T,S} =
ArrayPartition([allocate(s, d...) for (s,d) in zip(S.parameters, dims)]...)

storageTypeDisplayString(::Type{T}) where {T <: AbstractArray} = ""
storageDisplayString(L::AbstractOperator) = storageTypeDisplayString(codomainStorageType(L))

"""
`size(A::AbstractOperator, [dom,])`

Expand Down Expand Up @@ -370,7 +373,7 @@ end

#printing
function Base.show(io::IO, L::AbstractOperator)
print(io, fun_name(L)*" "*fun_space(L))
print(io, fun_name(L)*storageDisplayString(L)*" "*fun_space(L))
end

function fun_space(L::AbstractOperator)
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
using AbstractOperators
using LinearAlgebra, FFTW, DSP, SparseArrays, RecursiveArrayTools
using LinearAlgebra, FFTW, DSP, SparseArrays, RecursiveArrayTools, CUDA
using Printf
using Random
using Test
Expand Down
15 changes: 15 additions & 0 deletions test/test_linear_operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,24 @@ y2 = conv(x1,h)

@test all(norm.(y1 .- y2) .<= 1e-12)

z1 = op' * y1;
z2 = xcorr(y1, h)[size(op.h,1)[1]:end-length(op.h)+1];
@test all(norm.(z1 .- z2) .<= 1e-12)

# other constructors
op = Conv(x1,h)

# CUDA
if CUDA.functional()
cu_h = CuArray(h)
cu_op = Conv(Float64,(n,),cu_h)
cu_x1 = CuArray(x1)
cu_y1 = cu_op * cu_x1
@test all(norm.(y1 .- Array(cu_y1)) .<= 1e-12)
cu_z1 = cu_op' * cu_y1
@test all(norm.(z1 .- Array(cu_z1)) .<= 1e-12)
end

#properties
@test is_linear(op) == true
@test is_null(op) == false
Expand Down
Loading