Skip to content

Commit

Permalink
support mask_rcnn for kunlun (PaddlePaddle#1890)
Browse files Browse the repository at this point in the history
* support mask_rcnn for kunlun

* minor
  • Loading branch information
QingshuChen authored Dec 15, 2020
1 parent 2486a94 commit 6541208
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 5 deletions.
20 changes: 20 additions & 0 deletions ppdet/utils/check.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,32 @@

__all__ = [
'check_gpu',
'check_xpu',
'check_version',
'check_config',
'check_py_func',
]


def check_xpu(use_xpu):
"""
Log error and exit when set use_xpu=true in paddlepaddle
cpu/gpu version.
"""
err = "Config use_xpu cannot be set as true while you are " \
"using paddlepaddle cpu/gpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-xpu to run model on XPU \n" \
"\t2. Set use_xpu as false in config file to run " \
"model on CPU/GPU"

try:
if use_xpu and not fluid.is_compiled_with_xpu():
logger.error(err)
sys.exit(1)
except Exception as e:
pass


def check_gpu(use_gpu):
"""
Log error and exit when set use_gpu=true in paddlepaddle
Expand Down
18 changes: 16 additions & 2 deletions tools/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode

from ppdet.data.reader import create_reader

Expand All @@ -49,15 +49,27 @@ def main():
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
use_xpu = False
if hasattr(cfg, 'use_xpu'):
check_xpu(cfg.use_xpu)
use_xpu = cfg.use_xpu
# check if paddlepaddle version is satisfied
check_version()

assert not (use_xpu and cfg.use_gpu), \
'Can not run on both XPU and GPU'

main_arch = cfg.architecture

multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)

# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
if cfg.use_gpu:
place = fluid.CUDAPlace(0)
elif use_xpu:
place = fluid.XPUPlace(0)
else:
place = CPUPlace()
exe = fluid.Executor(place)

# build program
Expand Down Expand Up @@ -91,6 +103,8 @@ def main():
return

compile_program = fluid.CompiledProgram(eval_prog).with_data_parallel()
if use_xpu:
compile_program = eval_prog

assert cfg.metric != 'OID', "eval process of OID dataset \
is not supported."
Expand Down
28 changes: 25 additions & 3 deletions tools/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.stats import TrainingStats
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode
import ppdet.utils.checkpoint as checkpoint

import logging
Expand Down Expand Up @@ -73,9 +73,16 @@ def main():
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
use_xpu = False
if hasattr(cfg, 'use_xpu'):
check_xpu(cfg.use_xpu)
use_xpu = cfg.use_xpu
# check if paddlepaddle version is satisfied
check_version()

assert not (use_xpu and cfg.use_gpu), \
'Can not run on both XPU and GPU'

save_only = getattr(cfg, 'save_prediction_only', False)
if save_only:
raise NotImplementedError('The config file only support prediction,'
Expand All @@ -84,14 +91,25 @@ def main():

if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
elif use_xpu:
# ToDo(qingshu): XPU only support single card now
devices_num = 1
else:
devices_num = int(os.environ.get('CPU_NUM', 1))

if 'FLAGS_selected_gpus' in env:
if cfg.use_gpu and 'FLAGS_selected_gpus' in env:
device_id = int(env['FLAGS_selected_gpus'])
elif use_xpu and 'FLAGS_selected_xpus' in env:
device_id = int(env['FLAGS_selected_xpus'])
else:
device_id = 0
place = fluid.CUDAPlace(device_id) if cfg.use_gpu else fluid.CPUPlace()

if cfg.use_gpu:
place = fluid.CUDAPlace(device_id)
elif use_xpu:
place = fluid.XPUPlace(device_id)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)

lr_builder = create('LearningRate')
Expand Down Expand Up @@ -184,9 +202,13 @@ def main():
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
if use_xpu:
compiled_train_prog = train_prog

if FLAGS.eval:
compiled_eval_prog = fluid.CompiledProgram(eval_prog)
if use_xpu:
compiled_eval_prog = eval_prog

fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'

Expand Down

0 comments on commit 6541208

Please sign in to comment.