Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MKLDNN 1.0.2 #2102

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmake/external/mkldnn.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ include (ExternalProject)

set(MKLDNN_URL https://github.com/intel/mkl-dnn.git)
# If MKLDNN_TAG is updated, check if MKLML_VERSION and platform.cmake.patch need to be updated.
set(MKLDNN_TAG v0.18.1)
set(MKLDNN_TAG v1.0.2)
set(MKLML_VERSION 2019.0.5.20190502)

if(WIN32)
Expand Down Expand Up @@ -62,9 +62,9 @@ if (onnxruntime_USE_MKLDNN)
endif()
set(MKLDNN_INCLUDE_DIR ${MKLDNN_INSTALL}/include)
set(MKLDNN_CMAKE_EXTRA_ARGS)
set(MKLDNN_PATCH_COMMAND1 git apply ${CMAKE_SOURCE_DIR}/patches/mkldnn/mem-patch.cmake.patch)
# set(MKLDNN_PATCH_COMMAND1 git apply ${CMAKE_SOURCE_DIR}/patches/mkldnn/mem-patch.cmake.patch)
# discard prior changes due to patching in mkldnn source to unblock incremental builds.
set(MKLDNN_PATCH_DISCARD_COMMAND cd ${MKLDNN_SOURCE} && git checkout -- .)
# set(MKLDNN_PATCH_DISCARD_COMMAND cd ${MKLDNN_SOURCE} && git checkout -- .)
if(NOT onnxruntime_BUILD_FOR_NATIVE_MACHINE)
# pre-v1.0
list(APPEND MKLDNN_CMAKE_EXTRA_ARGS "-DARCH_OPT_FLAGS=")
Expand Down
195 changes: 0 additions & 195 deletions onnxruntime/core/providers/mkldnn/activation/activations.cc

This file was deleted.

20 changes: 0 additions & 20 deletions onnxruntime/core/providers/mkldnn/activation/activations.h

This file was deleted.

23 changes: 11 additions & 12 deletions onnxruntime/core/providers/mkldnn/math/gemm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "core/providers/cpu/math/gemm_helper.h"
#include "core/util/math_cpuonly.h"
#include "mkldnn.h"
#include "mkldnn.hpp"
#include "core/providers/mkldnn/mkldnn_fwd.h"

namespace onnxruntime {
Expand All @@ -28,9 +29,9 @@ Status Gemm<float>::Compute(OpKernelContext* ctx) const {
if (!helper.State().IsOK())
return helper.State();

int M = gsl::narrow_cast<int>(helper.M());
int N = gsl::narrow_cast<int>(helper.N());
int K = gsl::narrow_cast<int>(helper.K());
mkldnn::memory::dim M = gsl::narrow_cast<int>(helper.M());
mkldnn::memory::dim N = gsl::narrow_cast<int>(helper.N());
mkldnn::memory::dim K = gsl::narrow_cast<int>(helper.K());
auto Y = ctx->Output(0, TensorShape({M, N}));

if (beta_ != 0) {
Expand Down Expand Up @@ -77,15 +78,13 @@ Status Gemm<float>::Compute(OpKernelContext* ctx) const {
}
}

// mkldnn_sgemm expects col major matrices, so we need to swap the operands A and B
auto status = mkldnn_sgemm(trans_B_ ? "T" : "N",
trans_A_ ? "T" : "N",
&N, &M, &K,
&alpha_, W->template Data<float>(),
trans_B_ ? &K : &N,
X->template Data<float>(),
trans_A_ ? &M : &K,
&beta_, Y->template MutableData<float>(), &N);
// mkldnn_sgemm expects row major matrices, so no need to swap the operands A and B
auto status = mkldnn_sgemm(trans_A_ ? 'T' : 'N',
trans_B_ ? 'T' : 'N',
M, N, K,
alpha_, X->template Data<float>() , trans_A_ ? M : K,
W->template Data<float>(), trans_B_ ? K : N,
beta_, Y->template MutableData<float>(), N);
if (status == mkldnn_success) {
return Status::OK();
} else {
Expand Down
Loading