Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

2.0rc api rename #28088

Merged
merged 16 commits into from
Oct 21, 2020
2 changes: 1 addition & 1 deletion python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@

from .tensor.to_string import set_printoptions

from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):

import paddle

conv2d = paddle.nn.Conv2d(3, 2, 3, bias_attr=False)
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32])

with paddle.amp.auto_cast():
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/amp/grad_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class GradScaler(AmpScaler):

import paddle

model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
Expand Down Expand Up @@ -90,7 +90,7 @@ def scale(self, var):

import paddle

model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
Expand Down Expand Up @@ -122,7 +122,7 @@ def minimize(self, optimizer, *args, **kwargs):

import paddle

model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,13 +670,13 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]

paddle.manual_seed(200) # on CPU device
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y.numpy())
# [0.77663314 0.90824795 0.15685187
Expand All @@ -685,7 +685,7 @@ class Categorical(Distribution):
cat = Categorical(x)
cat2 = Categorical(y)

paddle.manual_seed(1000) # on CPU device
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
Expand Down Expand Up @@ -744,15 +744,15 @@ def sample(self, shape):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]

cat = Categorical(x)

paddle.manual_seed(1000) # on CPU device
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
Expand Down Expand Up @@ -791,13 +791,13 @@ def kl_divergence(self, other):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]

paddle.manual_seed(200) # on CPU device
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y.numpy())
# [0.77663314 0.90824795 0.15685187
Expand Down Expand Up @@ -842,7 +842,7 @@ def entropy(self):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
Expand Down Expand Up @@ -887,7 +887,7 @@ def probs(self, value):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
Expand Down Expand Up @@ -953,7 +953,7 @@ def log_prob(self, value):
import paddle
from paddle.distribution import Categorical

paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/contrib/tests/test_weight_decay_extend.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def run_program(self, place, feed_list):
return param_sum

def check_weight_decay(self, place, model):
paddle.manual_seed(1)
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
Expand All @@ -137,7 +137,7 @@ def check_weight_decay(self, place, model):
return param_sum

def check_weight_decay2(self, place, model):
paddle.manual_seed(1)
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dygraph/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1058,7 +1058,7 @@ def __init__(self):
super(Mylayer, self).__init__()
self.linear1 = paddle.nn.Linear(10, 10)
self.linear2 = paddle.nn.Linear(5, 5)
self.conv2d = paddle.nn.Conv2d(3, 2, 3)
self.conv2d = paddle.nn.Conv2D(3, 2, 3)
self.embedding = paddle.nn.Embedding(128, 16)
self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))

Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/dygraph/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class Conv2D(layers.Layer):
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
Expand Down Expand Up @@ -345,7 +345,7 @@ class Conv3D(layers.Layer):
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
Expand Down Expand Up @@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer):
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
Expand Down Expand Up @@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer):
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ class BilinearInitializer(Initializer):
regularizer=L2Decay(0.),
initializer=nn.initializer.Bilinear())
data = paddle.rand([B, 3, H, W], dtype='float32')
conv_up = nn.ConvTranspose2d(3,
conv_up = nn.Conv2DTranspose(3,
out_channels=C,
kernel_size=2 * factor - factor % 2,
padding=int(
Expand Down
48 changes: 30 additions & 18 deletions python/paddle/fluid/nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def simple_img_conv_pool(input,
act=None,
use_cudnn=True):
"""
:api_attr: Static Graph
:api_attr: Static Graph

The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .

Expand Down Expand Up @@ -106,6 +106,8 @@ def simple_img_conv_pool(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5,
Expand Down Expand Up @@ -151,37 +153,37 @@ def img_conv_group(input,
pool_type="max",
use_cudnn=True):
"""
:api_attr: Static Graph
:api_attr: Static Graph

The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
and Pool2d. According to the input arguments, img_conv_group will do serials of
and Pool2D. According to the input arguments, img_conv_group will do serials of
computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
result to Pool2d.
result to Pool2D.

Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64.
conv_num_filter(list|tuple): Indicates the numbers of filter of this group.
pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size
pool_size (int|list|tuple): The pooling size of Pool2D Layer. If pool_size
is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width).
Otherwise, the pool_size_height = pool_size_width = pool_size.
conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is
conv_padding (int|list|tuple): The padding size of the Conv2D Layer. If padding is
a list or tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_padding of all Conv2d Layers are the same. Default 1.
Otherwise the conv_padding of all Conv2D Layers are the same. Default 1.
conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or
tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3.
conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm.
Otherwise the conv_filter_size of all Conv2D Layers are the same. Default 3.
conv_act (str): Activation type for Conv2D Layer that is not followed by BatchNorm.
Default: None.
param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer.
param_attr (ParamAttr): The parameters to the Conv2D Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2D Layer.
If conv_with_batchnorm is a list, its length must be equal to the length of
conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the
Conv2d Layer follows a BatchNorm. Default False.
Conv2D Layer follows a BatchNorm. Default False.
conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer
after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be
equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout
Layers is conv_batchnorm_drop_rate. Default 0.0.
pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride
pool_stride (int|list|tuple): The pooling stride of Pool2D layer. If pool_stride
is a list or tuple, it must contain two integers, (pooling_stride_H,
pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
Default 1.
Expand All @@ -192,12 +194,15 @@ def img_conv_group(input,

Return:
A Variable holding Tensor representing the final result after serial computation using Convolution2d,
BatchNorm, DropOut, and Pool2d, whose data type is the same with input.
BatchNorm, DropOut, and Pool2D, whose data type is the same with input.

Examples:
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()

img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.img_conv_group(input=img,
conv_padding=1,
Expand Down Expand Up @@ -261,7 +266,7 @@ def sequence_conv_pool(input,
pool_type="max",
bias_attr=None):
"""
:api_attr: Static Graph
:api_attr: Static Graph

**This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead**
Expand Down Expand Up @@ -300,6 +305,8 @@ def sequence_conv_pool(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
input_dim = 100 #len(word_dict)
emb_dim = 128
hid_dim = 512
Expand Down Expand Up @@ -327,7 +334,7 @@ def sequence_conv_pool(input,

def glu(input, dim=-1):
"""
:api_attr: Static Graph
:api_attr: Static Graph

The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` ,
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` .
Expand Down Expand Up @@ -356,6 +363,9 @@ def glu(input, dim=-1):
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()

data = fluid.data(
name="words", shape=[-1, 6, 3, 9], dtype="float32")
# shape of output: [-1, 3, 3, 9]
Expand All @@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries,
num_heads=1,
dropout_rate=0.):
"""
:api_attr: Static Graph
:api_attr: Static Graph

This interface Multi-Head Attention using scaled dot product.
Attention mechanism can be seen as mapping a query and a set of key-value
Expand Down Expand Up @@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries,
.. code-block:: python

import paddle.fluid as fluid

import paddle
paddle.enable_static()

queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ def train_bmn(args, place, to_static):
loss_data = []

with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
global local_random
local_random = np.random.RandomState(SEED)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)

reader = get_random_input_data(args.batch_size, args.vocab_size,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place):

np.random.seed(SEED)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)

if args.model == "MobileNetV1":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def train(place):
batch_num = 200

with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def train(place):
batch_num = 200

paddle.disable_static(place)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def train(args, place, to_static):
env.seed(SEED)

with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED)

Expand Down
Loading