Skip to content

Commit

Permalink
Merge pull request #30 from PaddlePaddle/develop
Browse files Browse the repository at this point in the history
update
  • Loading branch information
zhengya01 authored Apr 25, 2019
2 parents 17364eb + ad9d219 commit 19aa404
Show file tree
Hide file tree
Showing 598 changed files with 80,754 additions and 55,452 deletions.
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,9 @@
[submodule "PaddleNLP/knowledge-driven-dialogue"]
path = PaddleNLP/knowledge-driven-dialogue
url = https://github.com/baidu/knowledge-driven-dialogue
[submodule "PaddleNLP/language_representations_kit"]
path = PaddleNLP/language_representations_kit
url = https://github.com/PaddlePaddle/LARK
[submodule "PaddleNLP/knowledge_driven_dialogue"]
path = PaddleNLP/knowledge_driven_dialogue
url = https://github.com/baidu/knowledge-driven-dialogue/
7 changes: 4 additions & 3 deletions PaddleCV/gan/cycle_gan/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@


运行本目录下的程序示例需要使用PaddlePaddle develop最新版本。如果您的PaddlePaddle安装版本低于此要求,请按照[安装文档](http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_cn.html)中的说明更新PaddlePaddle安装版本。

Expand Down Expand Up @@ -73,8 +72,8 @@ env CUDA_VISIBLE_DEVICES=0 python train.py
执行以下命令读取多张图片进行预测:

```
env CUDA_VISIBLE_DEVICE=0 python infer.py \
--init_model="checkpoints/1" --input="./data/inputA/*" \
env CUDA_VISIBLE_DEVICES=0 python infer.py \
--init_model="output/checkpoints/1" --input="./data/horse2zebra/trainA/*" \
--input_style A --output="./output"
```

Expand All @@ -89,3 +88,5 @@ env CUDA_VISIBLE_DEVICE=0 python infer.py \
<img src="images/B2A.jpg" width="620" hspace='10'/> <br/>
<strong>图 3</strong>
</p>

>在本文示例中,均可通过修改`CUDA_VISIBLE_DEVICES`改变使用的显卡号。
4 changes: 1 addition & 3 deletions PaddleCV/gan/cycle_gan/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import functools
import os
from PIL import Image
from paddle.fluid import core
import paddle.fluid as fluid
import paddle
import numpy as np
Expand Down Expand Up @@ -44,15 +43,14 @@ def infer(args):
if not os.path.exists(args.output):
os.makedirs(args.output)
for file in glob.glob(args.input):
print "read %s" % file
image_name = os.path.basename(file)
image = Image.open(file)
image = image.resize((256, 256))
image = np.array(image) / 127.5 - 1
if len(image.shape) != 3:
continue
data = image.transpose([2, 0, 1])[np.newaxis, :].astype("float32")
tensor = core.LoDTensor()
tensor = fluid.LoDTensor()
tensor.set(data, place)

fake_temp = exe.run(fetch_list=[fake.name], feed={"input": tensor})
Expand Down
11 changes: 5 additions & 6 deletions PaddleCV/gan/cycle_gan/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from scipy.misc import imsave
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import data_reader
from utility import add_arguments, print_arguments, ImagePool
from trainer import *
Expand All @@ -22,7 +21,7 @@
# yapf: disable
add_arg('batch_size', int, 1, "Minibatch size.")
add_arg('epoch', int, 2, "The number of epoched to be trained.")
add_arg('output', str, "./output_0", "The directory the model and the test result to be saved to.")
add_arg('output', str, "./output", "The directory the model and the test result to be saved to.")
add_arg('init_model', str, None, "The init model file of directory.")
add_arg('save_checkpoints', bool, True, "Whether to save checkpoints.")
add_arg('run_test', bool, True, "Whether to run test.")
Expand Down Expand Up @@ -82,8 +81,8 @@ def test(epoch):
for data_A, data_B in zip(A_test_reader(), B_test_reader()):
A_name = data_A[1]
B_name = data_B[1]
tensor_A = core.LoDTensor()
tensor_B = core.LoDTensor()
tensor_A = fluid.LoDTensor()
tensor_B = fluid.LoDTensor()
tensor_A.set(data_A[0], place)
tensor_B.set(data_B[0], place)
fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = exe.run(
Expand Down Expand Up @@ -168,8 +167,8 @@ def init_model():
for i in range(max_images_num):
data_A = next(A_reader)
data_B = next(B_reader)
tensor_A = core.LoDTensor()
tensor_B = core.LoDTensor()
tensor_A = fluid.LoDTensor()
tensor_B = fluid.LoDTensor()
tensor_A.set(data_A, place)
tensor_B.set(data_B, place)
s_time = time.time()
Expand Down
12 changes: 7 additions & 5 deletions PaddleCV/human_pose_estimation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ This is a simple demonstration of re-implementation in [PaddlePaddle.Fluid](http
## Requirements

- Python == 2.7
- PaddlePaddle >= 1.1.0
- Python == 2.7 or 3.6
- PaddlePaddle >= 1.1.0 (<= 1.3.0)
- opencv-python >= 3.3

## Environment

The code is developed and tested under 4 Tesla K40/P40 GPUS cards on CentOS with installed CUDA-9.2/8.0 and cuDNN-7.1.
The code is developed and tested under 4 Tesla K40/P40 GPUS cards on CentOS with installed CUDA-9.0/8.0 and cuDNN-7.0.

## Results on MPII Val
| Arch | Head | Shoulder | Elbow | Wrist | Hip | Knee | Ankle | Mean | [email protected]| Models |
Expand Down Expand Up @@ -85,19 +85,21 @@ python2 setup.py install --user
Downloading the checkpoints of Pose-ResNet-50 trained on MPII dataset from [here](https://paddlemodels.bj.bcebos.com/pose/pose-resnet50-mpii-384x384.tar.gz). Extract it into the folder `checkpoints` under the directory root of this repo. Then run

```bash
python val.py --dataset 'mpii' --checkpoint 'checkpoints/pose-resnet50-mpii-384x384'
python val.py --dataset 'mpii' --checkpoint 'checkpoints/pose-resnet50-mpii-384x384' --data_root 'data/mpii'
```

### Perform Training

```bash
python train.py --dataset 'mpii' # or coco
python train.py --dataset 'mpii' --data_root 'data/mpii'
```

**Note**: Configurations for training are aggregated in the `lib/mpii_reader.py` and `lib/coco_reader.py`.

### Perform Test on Images

We also support to apply pre-trained models on customized images.

Put the images into the folder `test` under the directory root of this repo. Then run

```bash
Expand Down
14 changes: 8 additions & 6 deletions PaddleCV/human_pose_estimation/README_cn.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
## 环境依赖

本目录下的代码均在4卡Tesla K40/P40 GPU,CentOS系统,CUDA-9.2/8.0,cuDNN-7.1环境下测试运行无误
本目录下的代码均在4卡Tesla K40/P40 GPU,CentOS系统,CUDA-9.0/8.0,cuDNN-7.0环境下测试运行无误

- Python == 2.7
- PaddlePaddle >= 1.1.0
- Python == 2.7 / 3.6
- PaddlePaddle >= 1.1.0 (<= 1.3.0)
- opencv-python >= 3.3

## MPII Val结果
Expand Down Expand Up @@ -83,19 +83,21 @@ python2 setup.py install --user
下载COCO/MPII预训练模型(见上表最后一列所附链接),保存到根目录下的'checkpoints'文件夹中,运行:

```bash
python val.py --dataset 'mpii' --checkpoint 'checkpoints/pose-resnet50-mpii-384x384'
python val.py --dataset 'mpii' --checkpoint 'checkpoints/pose-resnet50-mpii-384x384' --data_root 'data/mpii'
```

### 模型训练

```bash
python train.py --dataset 'mpii' # or coco
python train.py --dataset 'mpii'
```

**说明** 详细参数配置已保存到`lib/mpii_reader.py``lib/coco_reader.py`文件中,通过设置dataset来选择使用具体的参数配置

### 模型测试(任意图片,使用上述COCO或MPII预训练好的模型)

同时,我们支持使用预训练好的关键点检测模型预测任意图片

将测试图片放入根目录下的'test'文件夹中,执行

```bash
Expand All @@ -104,4 +106,4 @@ python test.py --checkpoint 'checkpoints/pose-resnet-50-384x384-mpii'

## 引用

- Simple Baselines for Human Pose Estimation and Tracking in PyTorch [`code`](https://github.com/Microsoft/human-pose-estimation.pytorch#data-preparation)
- Simple Baselines for Human Pose Estimation and Tracking in PyTorch [`code`](https://github.com/Microsoft/human-pose-estimation.pytorch#data-preparation)
11 changes: 9 additions & 2 deletions PaddleCV/human_pose_estimation/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

"""Functions for inference."""

import os
import sys
import argparse
import functools
import paddle
Expand All @@ -34,13 +34,18 @@
add_arg('dataset', str, 'mpii', "Dataset")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('kp_dim', int, 16, "Class number.")
add_arg('model_save_dir', str, "output", "Model save directory")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('checkpoint', str, None, "Whether to resume checkpoint.")
add_arg('flip_test', bool, True, "Flip test")
add_arg('shift_heatmap', bool, True, "Shift heatmap")
# yapf: enable


def print_immediately(s):
print(s)
sys.stdout.flush()


def test(args):
import lib.mpii_reader as reader
if args.dataset == 'coco':
Expand Down Expand Up @@ -89,6 +94,7 @@ def test(args):
fetch_list = [image.name, output.name]

for batch_id, data in enumerate(test_reader()):
print_immediately("Processing batch #%d" % batch_id)
num_images = len(data)

file_ids = []
Expand Down Expand Up @@ -124,6 +130,7 @@ def test(args):
out_heatmaps = (out_heatmaps + output_flipped) * 0.5
save_predict_results(input_image, out_heatmaps, file_ids, fold_name='results')


if __name__ == '__main__':
args = parser.parse_args()
test(args)
9 changes: 8 additions & 1 deletion PaddleCV/human_pose_estimation/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"""Functions for training."""

import os
import sys
import numpy as np
import cv2
import paddle
Expand Down Expand Up @@ -75,6 +76,12 @@ def optimizer_setting(args, params):

return optimizer


def print_immediately(s):
print(s)
sys.stdout.flush()


def train(args):
if args.dataset == 'coco':
import lib.coco_reader as reader
Expand Down Expand Up @@ -152,7 +159,7 @@ def if_exist(var):

loss = np.mean(np.array(loss))

print('Epoch [{:4d}/{:3d}] LR: {:.10f} '
print_immediately('Epoch [{:4d}/{:3d}] LR: {:.10f} '
'Loss = {:.5f}'.format(
batch_id, pass_id, current_lr[0], loss))

Expand Down
2 changes: 1 addition & 1 deletion PaddleCV/human_pose_estimation/utils/coco_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
import pickle

from utils.base_evaluator import BaseEvaluator
from utils.nms_utils import oks_nms
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from nms.nms import oks_nms


class COCOEvaluator(BaseEvaluator):
Expand Down
71 changes: 71 additions & 0 deletions PaddleCV/human_pose_estimation/utils/nms_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Copyright (c) 2019-present, Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np


def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
if not isinstance(sigmas, np.ndarray):
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
vars = (sigmas * 2) ** 2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros((d.shape[0]))
for n_d in range(0, d.shape[0]):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx ** 2 + dy ** 2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if in_vis_thre is not None:
ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
return ious


def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh, overlap = oks
:param kpts_db
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if len(kpts_db) == 0:
return []

scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])

order = scores.argsort()[::-1]

keep = []
while order.size > 0:
i = order[0]
keep.append(i)

oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, in_vis_thre)

inds = np.where(oks_ovr <= thresh)[0]
order = order[inds + 1]

return keep
2 changes: 1 addition & 1 deletion PaddleCV/human_pose_estimation/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def print_arguments(args):
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")

Expand Down
10 changes: 8 additions & 2 deletions PaddleCV/human_pose_estimation/val.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"""Functions for validation."""

import os
import sys
import argparse
import functools
import paddle
Expand All @@ -37,7 +38,6 @@
add_arg('num_epochs', int, 140, "Number of epochs.")
add_arg('total_images', int, 144406, "Training image number.")
add_arg('kp_dim', int, 16, "Class number.")
add_arg('model_save_dir', str, "output", "Model save directory")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('checkpoint', str, None, "Whether to resume checkpoint.")
Expand All @@ -49,6 +49,12 @@
add_arg('data_root', str, "data/coco", "Root directory of dataset")
# yapf: enable


def print_immediately(s):
print(s)
sys.stdout.flush()


def valid(args):
if args.dataset == 'coco':
import lib.coco_reader as reader
Expand Down Expand Up @@ -208,7 +214,7 @@ def if_exist(var):

idx += num_images

print('Epoch [{:4d}] '
print_immediately('Epoch [{:4d}] '
'Loss = {:.5f} '
'Acc = {:.5f}'.format(batch_id, loss, acc.avg))

Expand Down
Loading

0 comments on commit 19aa404

Please sign in to comment.