Skip to content

Commit

Permalink
* add PCI id to device description
Browse files Browse the repository at this point in the history
* when testing each gpu, add separator in log output
* test only one dimension when testing multiple GPUs

git-svn-id: https://xpra.org/svn/Xpra/trunk@4349 3bb7dfac-3a0b-4e04-842a-767bc560f471
  • Loading branch information
totaam committed Sep 18, 2013
1 parent a9a93d5 commit 4f6151b
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 9 deletions.
4 changes: 2 additions & 2 deletions src/tests/xpra/codecs/test_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
from tests.xpra.codecs.test_codec import make_planar_input, make_rgb_input
from xpra.codecs.image_wrapper import ImageWrapper

DEFAULT_TEST_DIMENSIONS = ((32, 32), (1920, 1080), (512, 512))
DEFAULT_TEST_DIMENSIONS = [(32, 32), (1920, 1080), (512, 512)]


def test_encoder(encoder_module, dimensions=DEFAULT_TEST_DIMENSIONS, options={}):
def test_encoder(encoder_module, options={}, dimensions=DEFAULT_TEST_DIMENSIONS):
print("test_encoder(%s, %s)" % (encoder_module, dimensions))
print("colorspaces=%s" % encoder_module.get_colorspaces())
for c in encoder_module.get_colorspaces():
Expand Down
18 changes: 12 additions & 6 deletions src/tests/xpra/codecs/test_nvenc.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,29 +5,35 @@
# later version. See the file COPYING for details.

from tests.xpra.codecs.test_encoder import test_encoder
from xpra.codecs.nvenc import encoder, get_cuda_devices #@UnresolvedImport

TEST_DIMENSIONS = ((32, 32), (1920, 1080), (512, 512))

def test_encode():
from xpra.codecs.nvenc import encoder #@UnresolvedImport
print("test_nvenc()")
test_encoder(encoder)

def test_parallel_encode():
cuda_devices = get_cuda_devices()
from xpra.codecs.nvenc import encoder #@UnresolvedImport
cuda_devices = encoder.get_cuda_devices()
print("test_parallel_encode() will test one encoder on each of %s sequentially" % cuda_devices)
TEST_DIMENSIONS = [(32, 32)]
for device_id, info in cuda_devices.items():
options = {"cuda_device" : device_id}
print("testing on %s" % info)
test_encoder(encoder, options)
print("")
print("**********************************")
print("**********************************")
print("testing on %s : %s" % (device_id, info))
test_encoder(encoder, options, TEST_DIMENSIONS)


def main():
import logging
import sys
logging.root.setLevel(logging.INFO)
logging.root.addHandler(logging.StreamHandler(sys.stdout))
test_encode()
print("main()")
test_parallel_encode()
#test_encode()


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion src/xpra/codecs/nvenc/encoder.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1085,7 +1085,7 @@ cdef cuda_init_devices():
raiseCuda(cuDeviceTotalMem(&totalMem, cuDevice), "cuDeviceTotalMem")
debug("device[%s]=%s (%sMB) - PCI: %s / %s - compute %s.%s (nvenc=%s)",
i, gpu_name, int(totalMem/1024/1024), pciBusID, pciDeviceID, SMmajor, SMminor, has_nvenc)
devices[i] = str(gpu_name)
devices[i] = "%s - PCI: %s / %s" % (gpu_name, pciBusID, pciDeviceID)
cuDeviceGetAttribute(&multiProcessorCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, cuDevice)
#log.info("multiProcessorCount=%s", multiProcessorCount)
#printf(" (%2d) Multiprocessors x (%3d) CUDA Cores/MP: %d CUDA Cores\n",
Expand Down

0 comments on commit 4f6151b

Please sign in to comment.