diff --git a/src/operator/tensor/elemwise_unary_op.h b/src/operator/tensor/elemwise_unary_op.h index 83b86bf1d94c..8d5ad055b118 100644 --- a/src/operator/tensor/elemwise_unary_op.h +++ b/src/operator/tensor/elemwise_unary_op.h @@ -29,11 +29,15 @@ #include #include #include +#include #include "./cast_storage-inl.h" #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../elemwise_op_common.h" #include "../../ndarray/ndarray_function.h" +#if MSHADOW_USE_MKL == 1 +#include "mkl.h" +#endif namespace mxnet { namespace op { @@ -348,6 +352,43 @@ class UnaryOp : public OpBase { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } + +#if MSHADOW_USE_MKL == 1 + static inline void MKLLog(MKL_INT size, const float* pIn, float* pOut) { + vsLn(size, pIn, pOut); + } + + static inline void MKLLog(MKL_INT size, const double* pIn, double* pOut) { + vdLn(size, pIn, pOut); + } +#endif + + template + static void LogCompute(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + if (req[0] == kNullOp) return; + // if defined MSHADOW_USE_MKL then call mkl log when req is KWriteTo, type_flag + // is mshadow::kFloat32 or mshadow::kFloat64 and data size less than or equal MKL_INT_MAX +#if MSHADOW_USE_MKL == 1 + auto type_flag = inputs[0].type_flag_; + const size_t MKL_INT_MAX = (sizeof(MKL_INT) == sizeof(int)) ? INT_MAX : LLONG_MAX; + size_t input_size = inputs[0].Size(); + if (req[0] == kWriteTo && + input_size <= MKL_INT_MAX && + (type_flag == mshadow::kFloat32 || type_flag == mshadow::kFloat64)) { + MSHADOW_SGL_DBL_TYPE_SWITCH(type_flag, DType, { + MKLLog(input_size, inputs[0].dptr(), outputs[0].dptr()); + }); + } else { + Compute(attrs, ctx, inputs, req, outputs); + } +#else + Compute(attrs, ctx, inputs, req, outputs); +#endif + } }; /*! \brief Map legacy unary_bwd to backward_grad */ diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc index 301fc48d2128..9730d0096e58 100644 --- a/src/operator/tensor/elemwise_unary_op_basic.cc +++ b/src/operator/tensor/elemwise_unary_op_basic.cc @@ -940,7 +940,7 @@ The storage type of ``exp`` output is always dense .set_attr("FGradient", ElemwiseGradUseOut{"_mul"}); // log -MXNET_OPERATOR_REGISTER_UNARY_WITH_SPARSE_DR(log, cpu, mshadow_op::log) +MXNET_OPERATOR_REGISTER_UNARY(log) MXNET_ADD_SPARSE_OP_ALIAS(log) .describe(R"code(Returns element-wise Natural logarithmic value of the input. @@ -949,6 +949,7 @@ The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` The storage type of ``log`` output is always dense )code" ADD_FILELINE) +.set_attr("FCompute", UnaryOp::LogCompute) .set_attr("FGradient", ElemwiseGradUseIn{"_backward_log"}); // log10