forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlayer_norm_kernel.h
55 lines (47 loc) · 1.37 KB
/
layer_norm_kernel.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#ifndef ATEN_SRC_NATIVE_CPU_LAYER_NORM_KERNEL_H_
#define ATEN_SRC_NATIVE_CPU_LAYER_NORM_KERNEL_H_
#include <ATen/ATen.h>
#include <ATen/native/DispatchStub.h>
namespace at {
namespace native {
using forward_fn = void (*)(
const Tensor& /* X */,
const Tensor& /* gamma */,
const Tensor& /* beta */,
int64_t /* M */,
int64_t /* N */,
double /* eps */,
Tensor* /* Y */,
Tensor* /* mean */,
Tensor* /* rstd */);
using backward_fn = void (*)(
const Tensor& /* dY */,
const Tensor& /* X */,
const Tensor& /* mean */,
const Tensor& /* rstd */,
const Tensor& /* gamma */,
int64_t /* M */,
int64_t /* N */,
Tensor* /* dX */,
Tensor* /* dgamma */,
Tensor* /* dbeta */);
using double_backward_fn = void (*)(
const Tensor& /* ddX */,
const Tensor& /* ddgamma */,
const Tensor& /* ddbeta */,
const Tensor& /* dY */,
const Tensor& /* X */,
const Tensor& /* mean */,
const Tensor& /* rstd */,
const Tensor& /* gamma */,
int64_t /* M */,
int64_t /* N */,
Tensor* /* ddY */,
Tensor* /* dX */,
Tensor* /* dgamma */);
DECLARE_DISPATCH(forward_fn, LayerNormKernel);
DECLARE_DISPATCH(backward_fn, LayerNormBackwardKernel);
DECLARE_DISPATCH(double_backward_fn, LayerNormDoubleBackwardKernel);
} // namespace native
} // namespace at
#endif // ATEN_SRC_NATIVE_CPU_LAYER_NORM_KERNEL_H_