Skip to content

Commit

Permalink
Refactor as suggested
Browse files Browse the repository at this point in the history
  • Loading branch information
piotrekobi committed Oct 1, 2021
1 parent 22fc17b commit dd1c0ab
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class EltwiseDivMKLDNNGradKernel : public ElemwiseGradKernel<T> {
ElemwiseGradKernel<T>::Compute(ctx);

auto& dev_ctx =
ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
ctx.template device_context<platform::MKLDNNDeviceContext>();
const auto& mkldnn_engine = dev_ctx.GetEngine();

auto* y = ctx.Input<framework::Tensor>("Y");
Expand Down Expand Up @@ -122,7 +122,7 @@ class EltwiseDivMKLDNNGradKernel : public ElemwiseGradKernel<T> {
astream.wait();
dy->set_format(
platform::GetMKLDNNFormat(dy_memory_p->get_desc().reshape(
paddle::framework::vectorize<int64_t>(dy->dims()))));
framework::vectorize<int64_t>(dy->dims()))));

} else {
dy->set_format(platform::GetMKLDNNFormat(*dst_dy_memory));
Expand Down
9 changes: 3 additions & 6 deletions paddle/fluid/platform/mkldnn_reuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -1334,18 +1334,15 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
conv_attr.set_output_scales(mask, output_shift_scale);
}
// Fusion with Elementwise layer relies on adding a sum post-operation
// with
// the scale parameter. It is assumed that when fuse_residual_connection
// is
// Fusion with Elementwise layer relies on adding a sum post-operation with
// the scale parameter. It is assumed that when fuse_residual_connection is
// true, the output tensor contains the data coming from residual
// connection. The result of this post_op is:
// Output = scale * Output + Conv_Out.
if (fuse_residual_conn) {
post_operations.append_sum(sum_scale);
}
// Fusion with ReLU layer is executed through the PostOps feature. Create
// a
// Fusion with ReLU layer is executed through the PostOps feature. Create a
// PostOps object and configure it to execute an eltwise relu operation.
if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
constexpr float scale = 1.0f;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,15 @@ def init_input_output(self):
self.out = np.divide(self.x, self.y)

def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)

def test_check_grad_ignore_x(self):
self.check_grad(
['Y'], 'Out', no_grad_set=set("X"), max_relative_error=0.05)
['Y'], 'Out', no_grad_set=set("X"), max_relative_error=0.02)

def test_check_grad_ignore_y(self):
self.check_grad(
['X'], 'Out', no_grad_set=set('Y'), max_relative_error=0.05)
['X'], 'Out', no_grad_set=set('Y'), max_relative_error=0.02)

def init_axis(self):
self.axis = -1
Expand Down Expand Up @@ -86,6 +86,7 @@ def init_input_output(self):
self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype)
self.out = np.divide(self.x, self.y)

# TODO(piotrekobiIntel): Enable when grad is ready
def test_check_grad_normal(self):
pass

Expand All @@ -99,6 +100,7 @@ def init_input_output(self):
self.y = np.random.uniform(1, 2, [100]).astype(self.dtype)
self.out = np.divide(self.x, self.y)

# TODO(piotrekobiIntel): Enable when grad is ready
def test_check_grad_normal(self):
pass

Expand Down

0 comments on commit dd1c0ab

Please sign in to comment.