Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Quantilized Fully Connected #5

Open
wants to merge 14 commits into
base: master2
Choose a base branch
from
167 changes: 167 additions & 0 deletions src/operator/quantization/mkldnn/mkldnn_quantized_fully_connected.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

#if MXNET_USE_MKLDNN == 1

#include "../../nn/mkldnn/mkldnn_base-inl.h"
#include "../quantization_utils.h"
#include "../../nn/fully_connected-inl.h"

namespace mxnet {
namespace op {

namespace quantilizedfc {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To align with the definition in fp32 fully_connected-inl.h, suggest to use quantized_fullc here.

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

enum QuantilizedfcOpResource {kTempSpace};
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

QuantizedFullyConnectedOpResource

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

}

struct QuantizedShiftKernel {
MSHADOW_XINLINE static void Map(int i, int8_t *in, uint8_t *out, int shift) {
out[i] = in[i] + shift;
}
};

struct QuantizedSumInitKernelWithBias {
// init sum data with bias for matrix b (n)
MSHADOW_XINLINE static void Map(int i, int32_t *out,
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Previously in the optimization for embedding, we thought that Map function will have a low efficiency for omp. Do you have any profiling data here?

Copy link
Owner Author

@lihaofd lihaofd Oct 19, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in ut testing, map way is faster(2-3X) than omp

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting. I thought kernel launch is also using omp.

const int8_t *bias, const float *min_out,
const float *max_out, const float *min_bias,
const float *max_bias) {
typedef int32_t T1;
typedef int8_t T2;
using mshadow::red::limits::MinValue;
using mshadow::red::limits::MaxValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<T1>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<T2>());
if (float_for_one_out_quant != 0) {
out[i] = bias[i] * float_for_one_bias_quant /
float_for_one_out_quant;
} else {
LOG(INFO) << "WARNING: QuantizedBiasAddKernel float_for_one_out_quant is 0 !";
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what's QuantizedBiasAddKernel?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

out[i] = 0;
}
}
};

struct QuantizedSumInitKernel {
// init sum data for matrix b (n)
MSHADOW_XINLINE static void Map(int i, int32_t *out) {
out[i] = 0;
}
};

struct QuantizedSumKernel {
// get sum data(n) for matrix b (n * k)
MSHADOW_XINLINE static void Map(int i, size_t k, int8_t *in, int32_t *out, int shift) {
out[i / k] -= shift * in[i];
}
};

struct QuantizedBetaCKernel {
// prepare beta C (from n to m * n)
MSHADOW_XINLINE static void Map(int i, size_t n, int32_t *out) {
out[i] = out[i % n];
}
};

template<typename SrcType>
void MKLDNNQuantizedFullyConnectedForward(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<NDArray> &in_data,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &out_data) {
const FullyConnectedParam& param = nnvm::get<FullyConnectedParam>(attrs.parsed);
using namespace mshadow;
using namespace mxnet_op;
size_t num_inputs = param.no_bias ? 2 : 3;
CHECK_EQ(in_data.size(), num_inputs * 3);
CHECK_EQ(out_data.size(), 3U);
const NDArray& data = in_data[0];
const NDArray& weight = in_data[1];
const NDArray& out = out_data[0];
TShape dshape = data.shape();
TShape wshape = weight.shape();
TShape oshape = out.shape();

const float alpha = 1.0f;
const float beta = 1.0f;
const CBLAS_OFFSET offsetc = CblasFixOffset;
const MKL_INT8 oa = 0;
const MKL_INT8 ob = 0;
MKL_INT32 oc = 0;
const int m = dshape[0], n = wshape[0], k = dshape.ProdShape(1, dshape.ndim());
Stream<cpu> *s = ctx.get_stream<cpu>();
// cblas_gemm_s8u8s32 required first matrix must be uint8
// shift data from int8(from -128 to 127) to uint8 (from 0 to 255)
int shift = 128;
Tensor<cpu, 1, uint8_t> shiftdata =
ctx.requested[quantilizedfc::kTempSpace].get_space_typed<cpu, 1, uint8_t>(
Shape1(m * k), s);
Kernel<QuantizedShiftKernel, cpu>::Launch(s, m * k, data.data().dptr<SrcType>(),
shiftdata.dptr_, shift);
Kernel<QuantizationRangeForMultiplicationStruct, cpu>::Launch(s, 1,
out_data[1].data().dptr<float>(), out_data[2].data().dptr<float>(),
in_data[num_inputs].data().dptr<float>(), in_data[num_inputs+1].data().dptr<float>(),
in_data[num_inputs+2].data().dptr<float>(), in_data[num_inputs+3].data().dptr<float>());
if (!param.no_bias) {
const NDArray& bias = in_data[2];
Kernel<QuantizedSumInitKernelWithBias, cpu>::Launch(s, n, out.data().dptr<int32_t>(),
bias.data().dptr<int8_t>(), out_data[1].data().dptr<float>(),
out_data[2].data().dptr<float>(), in_data[7].data().dptr<float>(),
in_data[8].data().dptr<float>());
} else {
Kernel<QuantizedSumInitKernel, cpu>::Launch(s, n, out.data().dptr<int32_t>());
}
Kernel<QuantizedSumKernel, cpu>::Launch(s, n * k, k, weight.data().dptr<SrcType>(),
out.data().dptr<int32_t>(), shift);

Kernel<QuantizedBetaCKernel, cpu>::Launch(s, m * n, n, out.data().dptr<int32_t>());

cblas_gemm_s8u8s32(CblasRowMajor,
CblasNoTrans,
CblasTrans,
offsetc,
m,
n,
k,
alpha,
shiftdata.dptr_,
k,
oa,
weight.data().dptr<SrcType>(),
k,
ob,
beta,
out.data().dptr<int32_t>(),
n,
&oc);
}

NNVM_REGISTER_OP(_contrib_quantized_fully_connected)
.set_attr<FComputeEx>("FComputeEx<cpu>",
MKLDNNQuantizedFullyConnectedForward<int8_t>)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

only supports int8? How about the quantized FC for GPU?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.set_attr("FCompute", QuantizedFullyConnectedForwardGPU<int8_t, int32_t, int32_t>); in quantized_fully_connected.cu

.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
});

} // namespace op
} // namespace mxnet
#endif
17 changes: 17 additions & 0 deletions src/operator/quantization/quantized_fully_connected.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,22 @@ bool QuantizedFullyConnectedType(const nnvm::NodeAttrs& attrs,
return true;
}

bool QuantizedFullyConnectedStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
*dispatch_mode = DispatchMode::kFCompute;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
#endif
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kDefaultStorage;
return true;
}

NNVM_REGISTER_OP(_contrib_quantized_fully_connected)
.describe(R"code(Fully Connected operator for input, weight and bias data type of int8,
and accumulates in type int32 for the output. For each argument, two more arguments of type
Expand Down Expand Up @@ -112,6 +128,7 @@ and max thresholds representing the threholds for quantizing the float32 output
})
.set_attr<nnvm::FInferShape>("FInferShape", QuantizedFullyConnectedShape)
.set_attr<nnvm::FInferType>("FInferType", QuantizedFullyConnectedType)
.set_attr<FInferStorageType>("FInferStorageType", QuantizedFullyConnectedStorageType)
.set_attr<FNeedRequantize>("FNeedRequantize", [](const NodeAttrs& attrs) { return true; })
.add_argument("data", "NDArray-or-Symbol", "Input data.")
.add_argument("weight", "NDArray-or-Symbol", "weight.")
Expand Down
25 changes: 13 additions & 12 deletions tests/python/quantization/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,10 +269,7 @@ def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_p
@with_seed()
def test_quantized_fc():
def check_quantized_fc(data_shape, num_hidden, no_bias, qdtype, flatten=True):
if mx.current_context().device_type != 'gpu':
print('skipped testing quantized_fc on cpu since it is not supported yet')
return
elif qdtype == 'uint8' and is_test_for_gpu():
if qdtype == 'uint8' and is_test_for_gpu():
print('skipped testing quantized_fc for gpu uint8 since it is not supported yet')
return

Expand All @@ -283,17 +280,17 @@ def check_quantized_fc(data_shape, num_hidden, no_bias, qdtype, flatten=True):
fc_fp32_exe = fc_fp32.simple_bind(ctx=mx.current_context(), grad_req='null')
if qdtype == 'uint8':
data_low = 0.0
data_high = 127.0
data_high = 63.0
else:
data_low = -127.0
data_high = 127.0
data_low = -63.0
data_high = 63.0
fc_fp32_exe.arg_dict[arg_names[0]][:] = mx.nd.random.uniform(low=data_low, high=data_high,
shape=data_shape).astype('int32')
fc_fp32_exe.arg_dict[arg_names[1]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=arg_shapes[1]).astype('int32')
shape=data_shape).astype('int8')
fc_fp32_exe.arg_dict[arg_names[1]][:] = mx.nd.random.uniform(low=data_low, high=data_high,
shape=arg_shapes[1]).astype('int8')
if not no_bias:
fc_fp32_exe.arg_dict[arg_names[2]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=arg_shapes[2]).astype('int32')
fc_fp32_exe.arg_dict[arg_names[2]][:] = mx.nd.random.uniform(low=data_low, high=data_high,
shape=arg_shapes[2]).astype('int8')
output = fc_fp32_exe.forward()[0]

qdata = mx.sym.Variable(name='qdata', shape=data_shape, dtype='int8')
Expand Down Expand Up @@ -335,6 +332,10 @@ def check_quantized_fc(data_shape, num_hidden, no_bias, qdtype, flatten=True):
check_quantized_fc((32, 111, 2, 2), 100, True, qdtype)
check_quantized_fc((32, 512, 2, 2), 100, False, qdtype)
check_quantized_fc((32, 111, 2, 2), 100, False, qdtype)
check_quantized_fc((256, 2048, 2, 2), 800, False, qdtype)
check_quantized_fc((256, 111, 2, 2), 800, False, qdtype)
check_quantized_fc((256, 2048, 2, 2), 800, True, qdtype)
check_quantized_fc((256, 111, 2, 2), 800, True, qdtype)

@with_seed()
def test_quantized_flatten():
Expand Down