-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Support Quantilized Fully Connected #5
base: master2
Are you sure you want to change the base?
Changes from 9 commits
1c20a87
6aaa4c1
d62481d
4080092
e03378b
ffc8064
cf9c074
f84668c
d4350e5
a3f16e4
ae6c940
18d04bc
e9e49c1
e8bf13f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,167 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one | ||
* or more contributor license agreements. See the NOTICE file | ||
* distributed with this work for additional information | ||
* regarding copyright ownership. The ASF licenses this file | ||
* to you under the Apache License, Version 2.0 (the | ||
* "License"); you may not use this file except in compliance | ||
* with the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, | ||
* software distributed under the License is distributed on an | ||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
* KIND, either express or implied. See the License for the | ||
* specific language governing permissions and limitations | ||
* under the License. | ||
*/ | ||
|
||
#if MXNET_USE_MKLDNN == 1 | ||
|
||
#include "../../nn/mkldnn/mkldnn_base-inl.h" | ||
#include "../quantization_utils.h" | ||
#include "../../nn/fully_connected-inl.h" | ||
|
||
namespace mxnet { | ||
namespace op { | ||
|
||
namespace quantilizedfc { | ||
enum QuantilizedfcOpResource {kTempSpace}; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. QuantizedFullyConnectedOpResource There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||
} | ||
|
||
struct QuantizedShiftKernel { | ||
MSHADOW_XINLINE static void Map(int i, int8_t *in, uint8_t *out, int shift) { | ||
out[i] = in[i] + shift; | ||
} | ||
}; | ||
|
||
struct QuantizedSumInitKernelWithBias { | ||
// init sum data with bias for matrix b (n) | ||
MSHADOW_XINLINE static void Map(int i, int32_t *out, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Previously in the optimization for embedding, we thought that There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. in ut testing, map way is faster(2-3X) than omp There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting. I thought kernel launch is also using omp. |
||
const int8_t *bias, const float *min_out, | ||
const float *max_out, const float *min_bias, | ||
const float *max_bias) { | ||
typedef int32_t T1; | ||
typedef int8_t T2; | ||
using mshadow::red::limits::MinValue; | ||
using mshadow::red::limits::MaxValue; | ||
float float_for_one_out_quant = | ||
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<T1>()); | ||
float float_for_one_bias_quant = | ||
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<T2>()); | ||
if (float_for_one_out_quant != 0) { | ||
out[i] = bias[i] * float_for_one_bias_quant / | ||
float_for_one_out_quant; | ||
} else { | ||
LOG(INFO) << "WARNING: QuantizedBiasAddKernel float_for_one_out_quant is 0 !"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what's QuantizedBiasAddKernel? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||
out[i] = 0; | ||
} | ||
} | ||
}; | ||
|
||
struct QuantizedSumInitKernel { | ||
// init sum data for matrix b (n) | ||
MSHADOW_XINLINE static void Map(int i, int32_t *out) { | ||
out[i] = 0; | ||
} | ||
}; | ||
|
||
struct QuantizedSumKernel { | ||
// get sum data(n) for matrix b (n * k) | ||
MSHADOW_XINLINE static void Map(int i, size_t k, int8_t *in, int32_t *out, int shift) { | ||
out[i / k] -= shift * in[i]; | ||
} | ||
}; | ||
|
||
struct QuantizedBetaCKernel { | ||
// prepare beta C (from n to m * n) | ||
MSHADOW_XINLINE static void Map(int i, size_t n, int32_t *out) { | ||
out[i] = out[i % n]; | ||
} | ||
}; | ||
|
||
template<typename SrcType> | ||
void MKLDNNQuantizedFullyConnectedForward(const nnvm::NodeAttrs& attrs, | ||
const OpContext &ctx, | ||
const std::vector<NDArray> &in_data, | ||
const std::vector<OpReqType> &req, | ||
const std::vector<NDArray> &out_data) { | ||
const FullyConnectedParam& param = nnvm::get<FullyConnectedParam>(attrs.parsed); | ||
using namespace mshadow; | ||
using namespace mxnet_op; | ||
size_t num_inputs = param.no_bias ? 2 : 3; | ||
CHECK_EQ(in_data.size(), num_inputs * 3); | ||
CHECK_EQ(out_data.size(), 3U); | ||
const NDArray& data = in_data[0]; | ||
const NDArray& weight = in_data[1]; | ||
const NDArray& out = out_data[0]; | ||
TShape dshape = data.shape(); | ||
TShape wshape = weight.shape(); | ||
TShape oshape = out.shape(); | ||
|
||
const float alpha = 1.0f; | ||
const float beta = 1.0f; | ||
const CBLAS_OFFSET offsetc = CblasFixOffset; | ||
const MKL_INT8 oa = 0; | ||
const MKL_INT8 ob = 0; | ||
MKL_INT32 oc = 0; | ||
const int m = dshape[0], n = wshape[0], k = dshape.ProdShape(1, dshape.ndim()); | ||
Stream<cpu> *s = ctx.get_stream<cpu>(); | ||
// cblas_gemm_s8u8s32 required first matrix must be uint8 | ||
// shift data from int8(from -128 to 127) to uint8 (from 0 to 255) | ||
int shift = 128; | ||
Tensor<cpu, 1, uint8_t> shiftdata = | ||
ctx.requested[quantilizedfc::kTempSpace].get_space_typed<cpu, 1, uint8_t>( | ||
Shape1(m * k), s); | ||
Kernel<QuantizedShiftKernel, cpu>::Launch(s, m * k, data.data().dptr<SrcType>(), | ||
shiftdata.dptr_, shift); | ||
Kernel<QuantizationRangeForMultiplicationStruct, cpu>::Launch(s, 1, | ||
out_data[1].data().dptr<float>(), out_data[2].data().dptr<float>(), | ||
in_data[num_inputs].data().dptr<float>(), in_data[num_inputs+1].data().dptr<float>(), | ||
in_data[num_inputs+2].data().dptr<float>(), in_data[num_inputs+3].data().dptr<float>()); | ||
if (!param.no_bias) { | ||
const NDArray& bias = in_data[2]; | ||
Kernel<QuantizedSumInitKernelWithBias, cpu>::Launch(s, n, out.data().dptr<int32_t>(), | ||
bias.data().dptr<int8_t>(), out_data[1].data().dptr<float>(), | ||
out_data[2].data().dptr<float>(), in_data[7].data().dptr<float>(), | ||
in_data[8].data().dptr<float>()); | ||
} else { | ||
Kernel<QuantizedSumInitKernel, cpu>::Launch(s, n, out.data().dptr<int32_t>()); | ||
} | ||
Kernel<QuantizedSumKernel, cpu>::Launch(s, n * k, k, weight.data().dptr<SrcType>(), | ||
out.data().dptr<int32_t>(), shift); | ||
|
||
Kernel<QuantizedBetaCKernel, cpu>::Launch(s, m * n, n, out.data().dptr<int32_t>()); | ||
|
||
cblas_gemm_s8u8s32(CblasRowMajor, | ||
CblasNoTrans, | ||
CblasTrans, | ||
offsetc, | ||
m, | ||
n, | ||
k, | ||
alpha, | ||
shiftdata.dptr_, | ||
k, | ||
oa, | ||
weight.data().dptr<SrcType>(), | ||
k, | ||
ob, | ||
beta, | ||
out.data().dptr<int32_t>(), | ||
n, | ||
&oc); | ||
} | ||
|
||
NNVM_REGISTER_OP(_contrib_quantized_fully_connected) | ||
.set_attr<FComputeEx>("FComputeEx<cpu>", | ||
MKLDNNQuantizedFullyConnectedForward<int8_t>) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. only supports int8? How about the quantized FC for GPU? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. .set_attr("FCompute", QuantizedFullyConnectedForwardGPU<int8_t, int32_t, int32_t>); in quantized_fully_connected.cu |
||
.set_attr<FResourceRequest>("FResourceRequest", | ||
[](const NodeAttrs& attrs) { | ||
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; | ||
}); | ||
|
||
} // namespace op | ||
} // namespace mxnet | ||
#endif |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To align with the definition in fp32 fully_connected-inl.h, suggest to use
quantized_fullc
here.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
fixed