-
Notifications
You must be signed in to change notification settings - Fork 22
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add more tests to cover different use cases
- Loading branch information
1 parent
a0e32c0
commit 0168438
Showing
2 changed files
with
249 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
import sys | ||
import pytest | ||
|
||
import numpy | ||
import dpnp | ||
from numpy.testing import assert_array_equal | ||
from .helper import ( | ||
get_all_dtypes, | ||
) | ||
|
||
device_oneAPI = 14 # DLDeviceType.kDLOneAPI | ||
|
||
class TestDLPack: | ||
# @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) | ||
# def test_dunder_dlpack_refcount(self, max_version): | ||
# x = dpnp.arange(5) | ||
# y = x.__dlpack__(max_version=max_version) | ||
# print() | ||
# print(sys.getrefcount(x), sys.getrefcount(y)) | ||
# assert sys.getrefcount(x) == 2 | ||
# del y | ||
# print(sys.getrefcount(x)) | ||
# assert sys.getrefcount(x) == 2 | ||
|
||
@pytest.mark.parametrize("stream", [None, 1]) | ||
def test_stream(self, stream): | ||
x = dpnp.arange(5) | ||
x.__dlpack__(stream=stream) | ||
|
||
@pytest.mark.parametrize("copy", [True, None, False]) | ||
def test_copy(self, copy): | ||
x = dpnp.arange(5) | ||
x.__dlpack__(copy=copy) | ||
|
||
def test_wrong_copy(self): | ||
x = dpnp.arange(5) | ||
x.__dlpack__(copy=dpnp.array([1, 2, 3])) | ||
|
||
@pytest.mark.parametrize("xp", [dpnp, numpy]) | ||
@pytest.mark.parametrize("dt", get_all_dtypes(no_none=True)) | ||
def test_dtype_passthrough(self, xp, dt): | ||
x = xp.arange(5).astype(dt) | ||
y = xp.from_dlpack(x) | ||
|
||
assert y.dtype == x.dtype | ||
assert_array_equal(x, y) | ||
|
||
@pytest.mark.parametrize("xp", [dpnp, numpy]) | ||
def test_non_contiguous(self, xp): | ||
x = xp.arange(25).reshape((5, 5)) | ||
|
||
y1 = x[0] | ||
assert_array_equal(y1, xp.from_dlpack(y1)) | ||
|
||
y2 = x[:, 0] | ||
assert_array_equal(y2, xp.from_dlpack(y2)) | ||
|
||
y3 = x[1, :] | ||
assert_array_equal(y3, xp.from_dlpack(y3)) | ||
|
||
y4 = x[1] | ||
assert_array_equal(y4, xp.from_dlpack(y4)) | ||
|
||
y5 = xp.diagonal(x).copy() | ||
assert_array_equal(y5, xp.from_dlpack(y5)) | ||
|
||
def test_device(self): | ||
x = dpnp.arange(5) | ||
assert x.__dlpack_device__()[0] == device_oneAPI | ||
y = dpnp.from_dlpack(x) | ||
assert y.__dlpack_device__()[0] == device_oneAPI | ||
z = y[::2] | ||
assert z.__dlpack_device__()[0] == device_oneAPI | ||
|
||
# def dlpack_deleter_exception(self, max_version): | ||
# x = np.arange(5) | ||
# _ = x.__dlpack__(max_version=max_version) | ||
# raise RuntimeError | ||
|
||
# @pytest.mark.parametrize("max_version", [None, (1, 0)]) | ||
# def test_dlpack_destructor_exception(self, max_version): | ||
# with pytest.raises(RuntimeError): | ||
# self.dlpack_deleter_exception(max_version=max_version) | ||
|
||
# def test_readonly(self): | ||
# x = np.arange(5) | ||
# x.flags.writeable = False | ||
# # Raises without max_version | ||
# with pytest.raises(BufferError): | ||
# x.__dlpack__() | ||
|
||
# # But works fine if we try with version | ||
# y = np.from_dlpack(x) | ||
# assert not y.flags.writeable | ||
|
||
# def test_ndim0(self): | ||
# x = np.array(1.0) | ||
# y = np.from_dlpack(x) | ||
# assert_array_equal(x, y) | ||
|
||
# def test_size1dims_arrays(self): | ||
# x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), | ||
# buffer=np.ones(1000, dtype=np.uint8), order='F') | ||
# y = np.from_dlpack(x) | ||
# assert_array_equal(x, y) | ||
|
||
# def test_copy(self): | ||
# x = np.arange(5) | ||
|
||
# y = np.from_dlpack(x) | ||
# assert np.may_share_memory(x, y) | ||
# y = np.from_dlpack(x, copy=False) | ||
# assert np.may_share_memory(x, y) | ||
# y = np.from_dlpack(x, copy=True) | ||
# assert not np.may_share_memory(x, y) | ||
|
||
# def test_device(self): | ||
# x = np.arange(5) | ||
# # requesting (1, 0), i.e. CPU device works in both calls: | ||
# x.__dlpack__(dl_device=(1, 0)) | ||
# np.from_dlpack(x, device="cpu") | ||
# np.from_dlpack(x, device=None) | ||
|
||
# with pytest.raises(ValueError): | ||
# x.__dlpack__(dl_device=(10, 0)) | ||
# with pytest.raises(ValueError): | ||
# np.from_dlpack(x, device="gpu") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
import unittest | ||
|
||
import pytest | ||
|
||
import numpy | ||
import dpnp as cupy | ||
import dpctl.tensor._dlpack as dlp | ||
from tests.third_party.cupy import testing | ||
|
||
|
||
def _gen_array(dtype): | ||
if cupy.issubdtype(dtype, numpy.unsignedinteger): | ||
array = cupy.random.randint( | ||
0, 10, size=(2, 3)).astype(dtype) | ||
elif cupy.issubdtype(dtype, cupy.integer): | ||
array = cupy.random.randint( | ||
-10, 10, size=(2, 3)).astype(dtype) | ||
elif cupy.issubdtype(dtype, cupy.floating): | ||
array = cupy.random.rand( | ||
2, 3).astype(dtype) | ||
elif cupy.issubdtype(dtype, cupy.complexfloating): | ||
array = cupy.random.random((2, 3)).astype(dtype) | ||
elif dtype == cupy.bool_: | ||
array = cupy.random.randint(0, 2, size=(2, 3)).astype(cupy.bool_) | ||
else: | ||
assert False, f'unrecognized dtype: {dtype}' | ||
return array | ||
|
||
|
||
class TestDLPackConversion(unittest.TestCase): | ||
@testing.for_all_dtypes(no_bool=False) | ||
def test_conversion(self, dtype): | ||
orig_array = _gen_array(dtype) | ||
tensor = orig_array.__dlpack__() | ||
out_array = dlp.from_dlpack_capsule(tensor) | ||
testing.assert_array_equal(orig_array, out_array) | ||
assert orig_array._pointer == out_array._pointer | ||
|
||
|
||
@testing.parameterize(*testing.product({ | ||
'memory': ('device', 'managed') | ||
})) | ||
class TestNewDLPackConversion(unittest.TestCase): | ||
@testing.for_all_dtypes(no_bool=False) | ||
def test_conversion(self, dtype): | ||
orig_array = _gen_array(dtype) | ||
out_array = cupy.from_dlpack(orig_array) | ||
testing.assert_array_equal(orig_array, out_array) | ||
assert orig_array._pointer == out_array._pointer | ||
|
||
@pytest.mark.skip(reason="no stream support") | ||
def test_stream(self): | ||
allowed_streams = ['null', True] | ||
if not cuda.runtime.is_hip: | ||
allowed_streams.append('ptds') | ||
|
||
# stream order is automatically established via DLPack protocol | ||
for src_s in [self._get_stream(s) for s in allowed_streams]: | ||
for dst_s in [self._get_stream(s) for s in allowed_streams]: | ||
with src_s: | ||
orig_array = _gen_array(cupy.float32) | ||
# If src_s != dst_s, dst_s waits until src_s complete. | ||
# Null stream (0) must be passed as streamLegacy (1) | ||
# on CUDA. | ||
if not cuda.runtime.is_hip and dst_s.ptr == 0: | ||
s_ptr = 1 | ||
else: | ||
s_ptr = dst_s.ptr | ||
dltensor = orig_array.__dlpack__(stream=s_ptr) | ||
|
||
with dst_s: | ||
out_array = cupy.from_dlpack(dltensor) | ||
testing.assert_array_equal(orig_array, out_array) | ||
testing.assert_array_equal( | ||
orig_array.data.ptr, out_array.data.ptr) | ||
|
||
|
||
class TestDLTensorMemory(unittest.TestCase): | ||
# def setUp(self): | ||
# self.old_pool = cupy.get_default_memory_pool() | ||
# self.pool = cupy.cuda.MemoryPool() | ||
# cupy.cuda.set_allocator(self.pool.malloc) | ||
|
||
# def tearDown(self): | ||
# self.pool.free_all_blocks() | ||
# cupy.cuda.set_allocator(self.old_pool.malloc) | ||
|
||
def test_deleter(self): | ||
# memory is freed when tensor is deleted, as it's not consumed | ||
array = cupy.empty(10) | ||
tensor = array.__dlpack__() | ||
# str(tensor): <capsule object "dltensor" at 0x7f7c4c835330> | ||
assert "\"dltensor\"" in str(tensor) | ||
# assert self.pool.n_free_blocks() == 0 | ||
# del array | ||
# assert self.pool.n_free_blocks() == 0 | ||
# del tensor | ||
# assert self.pool.n_free_blocks() == 1 | ||
|
||
def test_deleter2(self): | ||
# memory is freed when array2 is deleted, as tensor is consumed | ||
array = cupy.empty(10) | ||
tensor = array.__dlpack__() | ||
assert "\"dltensor\"" in str(tensor) | ||
array2 = dlp.from_dlpack_capsule(tensor) | ||
assert "\"used_dltensor\"" in str(tensor) | ||
# assert self.pool.n_free_blocks() == 0 | ||
# del array | ||
# assert self.pool.n_free_blocks() == 0 | ||
# del array2 | ||
# assert self.pool.n_free_blocks() == 1 | ||
# del tensor | ||
# assert self.pool.n_free_blocks() == 1 | ||
|
||
def test_multiple_consumption_error(self): | ||
# Prevent segfault, see #3611 | ||
array = cupy.empty(10) | ||
tensor = array.__dlpack__() | ||
array2 = dlp.from_dlpack_capsule(tensor) | ||
with pytest.raises(ValueError) as e: | ||
array3 = dlp.from_dlpack_capsule(tensor) | ||
assert 'consumed multiple times' in str(e.value) |