Skip to content

Commit

Permalink
Add implementation notes to host functionality (#401)
Browse files Browse the repository at this point in the history
  • Loading branch information
vchuravy authored Jun 20, 2023
1 parent 606b2c5 commit 5427d33
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 17 deletions.
87 changes: 71 additions & 16 deletions src/KernelAbstractions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ KernelAbstractions primitives can be used in non-kernel functions.
!!! warn
This is an experimental feature.
"""
macro kernel(config, expr)
if config isa Expr && config.head == :(=) &&
Expand Down Expand Up @@ -97,13 +96,19 @@ macro Const end
copyto!(::Backend, dest::AbstractArray, src::AbstractArray)
Perform a `copyto!` operation that execution ordered with respect to the backend.
!!! note
Backend implementations **must** implement this function.
"""
function copyto! end

"""
synchronize(::Backend)
Synchronize the current backend.
!!! note
Backend implementations **must** implement this function.
"""
function synchronize end

Expand All @@ -114,12 +119,13 @@ Release the memory of an array for reuse by future allocations
and reduce pressure on the allocator.
After releasing the memory of an array, it should no longer be accessed.
This function is optional both to implement and call.
If not implemented for a particular backend, default action is a no-op.
Otherwise, it should be defined for backend's array type.
!!! note
On CPU backend this is always a no-op.
!!! note
Backend implementations **may** implement this function.
If not implemented for a particular backend, default action is a no-op.
Otherwise, it should be defined for backend's array type.
"""
function unsafe_free! end

Expand Down Expand Up @@ -393,9 +399,17 @@ constify(arg) = adapt(ConstAdaptor(), arg)
###

"""
Abstract type for all KernelAbstractions backends.
Abstract type for all KernelAbstractions backends.
"""
abstract type Backend end

"""
Abstract type for all GPU based KernelAbstractions backends.
!!! note
New backend implementations **must** sub-type this abstract type.
"""
abstract type GPU <: Backend end

"""
Expand All @@ -412,6 +426,11 @@ struct CPU <: Backend
CPU(;static::Bool=false) = new(static)
end

"""
isgpu(::Backend)::Bool
Returns true for all [`GPU`](@ref) backends.
"""
isgpu(::GPU) = true
isgpu(::CPU) = false

Expand All @@ -420,6 +439,10 @@ isgpu(::CPU) = false
get_backend(A::AbstractArray)::Backend
Get a [`Backend`](@ref) instance suitable for array `A`.
!!! note
Backend implementations **must** provide `get_backend` for their custom array type.
It should be the same as the return type of [`allocate`](@ref)
"""
function get_backend end

Expand All @@ -438,39 +461,61 @@ get_backend(::Array) = CPU()
Adapt.adapt_storage(::CPU, a::Array) = a

"""
allocate(::Backend, Type, dims...)
allocate(::Backend, Type, dims...)::AbstractArray
Allocate a storage array appropriate for the computational backend.
!!! note
Backend implementations **must** implement `allocate(::NewBackend, T, dims::Tuple)`
"""
allocate(backend::Backend, T, dims...) = allocate(backend, T, dims)
allocate(backend::Backend, T, dims::Tuple) = throw(MethodError(allocate, (backend, T, dims)))

"""
allocate(backend, T, dims...) = return allocate(backend, T, dims)
zeros(::Backend, Type, dims...)::AbstractArray
zeros(backend, T, dims...) = zeros(backend, T, dims)
function zeros(backend, ::Type{T}, dims::Tuple) where T
Allocate a storage array appropriate for the computational backend filled with zeros.
"""
zeros(backend::Backend, T, dims...) = zeros(backend, T, dims)
function zeros(backend::Backend, ::Type{T}, dims::Tuple) where T
data = allocate(backend, T, dims...)
fill!(data, zero(T))
return data
end

ones(backend, T, dims...) = ones(backend, T, dims)
function ones(backend, ::Type{T}, dims::Tuple) where T
"""
ones(::Backend, Type, dims...)::AbstractArray
Allocate a storage array appropriate for the computational backend filled with ones.
"""
ones(backend::Backend, T, dims...) = ones(backend, T, dims)
function ones(backend::Backend, ::Type{T}, dims::Tuple) where T
data = allocate(backend, T, dims)
fill!(data, one(T))
return data
end

"""
supports_atomics(::Backend)
supports_atomics(::Backend)::Bool
Returns whether `@atomic` operations are supported by the backend.
!!! note
Backend implementations **must** implement this function,
only if they **do not** support atomic operations with Atomix.
"""
supports_atomics(backend) = true
supports_atomics(::Backend) = true

"""
supports_float64(::Backend)
supports_float64(::Backend)::Bool
Returns whether `Float64` values are supported by the backend.
!!! note
Backend implementations **must** implement this function,
only if they **do not** support `Float64`.
"""
supports_float64(backend) = true
supports_float64(::Backend) = true

"""
priority!(::Backend, prio::Symbol)
Expand All @@ -479,6 +524,9 @@ Set the priority for the backend stream/queue. This is an optional
feature that backends may or may not implement. If a backend shall
support priorities it must accept `:high`, `:normal`, `:low`.
Where `:normal` is the default.
!!! note
Backend implementations **may** implement this function.
"""
function priority!(::Backend, prio::Symbol)
if !(prio in (:high, :normal, :low))
Expand All @@ -501,6 +549,13 @@ import .NDIteration: get
Kernel closure struct that is used to represent the backend
kernel on the host. `WorkgroupSize` is the number of workitems
in a workgroup.
!!! note
Backend implementations **must** implement:
```
(kernel::Kernel{<:NewBackend})(args...; ndrange=nothing, workgroupsize=nothing)
```
As well as the on-device functionality.
"""
struct Kernel{Backend, WorkgroupSize<:_Size, NDRange<:_Size, Fun}
backend::Backend
Expand Down
2 changes: 1 addition & 1 deletion test/convert.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ using KernelAbstractions, Test
end

function convert_testsuite(backend, ArrayT)
ET = KernelAbstractions.supports_float64(backend) ? Float64 : Float32
ET = KernelAbstractions.supports_float64(backend()) ? Float64 : Float32

N = 32
d_A = ArrayT([rand(ET)*3 for i = 1:N])
Expand Down
27 changes: 27 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,30 @@ kern_static(CPU(static=true), (1,))(A, ndrange=length(A))
@kernel cpu=false function my_no_cpu_kernel(a)
end
@test_throws ErrorException("This kernel is unavailable for backend CPU") my_no_cpu_kernel(CPU())

struct NewBackend <: KernelAbstractions.GPU end
@testset "Default host implementation" begin
backend = NewBackend()
@test KernelAbstractions.isgpu(backend) == true

@test_throws MethodError KernelAbstractions.synchronize(backend)

@test_throws MethodError KernelAbstractions.allocate(backend, Float32, 1)
@test_throws MethodError KernelAbstractions.allocate(backend, Float32, (1,))
@test_throws MethodError KernelAbstractions.allocate(backend, Float32, 1, 2)

@test_throws MethodError KernelAbstractions.zeros(backend, Float32, 1)
@test_throws MethodError KernelAbstractions.ones(backend, Float32, 1)

@test KernelAbstractions.supports_atomics(backend) == true
@test KernelAbstractions.supports_float64(backend) == true

@test KernelAbstractions.priority!(backend, :high) === nothing
@test KernelAbstractions.priority!(backend, :normal) === nothing
@test KernelAbstractions.priority!(backend, :low) === nothing

@test_throws ErrorException KernelAbstractions.priority!(backend, :middle)

kernel = my_no_cpu_kernel(backend)
@test_throws MethodError kernel()
end

0 comments on commit 5427d33

Please sign in to comment.