Skip to content

Commit

Permalink
Merge branch 'master' into shape-bucket-opt
Browse files Browse the repository at this point in the history
  • Loading branch information
FusionBolt authored Aug 18, 2023
2 parents 30dba0f + 2f10458 commit 4b59fd1
Show file tree
Hide file tree
Showing 146 changed files with 2,581 additions and 1,386 deletions.
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
<img src="docs/logo.png" width="400" alt="nncase" />
</div>

[![License](https://img.shields.io/badge/license-Apache%202-blue)](https://raw.githubusercontent.com/kendryte/nncase/master/LICENSE)
[![compiler-build](https://github.com/kendryte/nncase/actions/workflows/compiler-build.yml/badge.svg)](https://github.com/kendryte/nncase/actions/workflows/compiler-build.yml)
[![GitHub repository](https://img.shields.io/badge/github-repository-blue?logo=github&style=plastic)](https://github.com/kendryte/nncase)
[![Gitee repository](https://img.shields.io/badge/gitee-repository-blue?logo=gitee&style=plastic)](https://gitee.com/kendryte/nncase)
[![GitHub release](https://img.shields.io/github/v/release/kendryte/nncase?color=brightgreen&display_name=tag&logo=github&style=plastic)](https://github.com/kendryte/nncase/releases)

`nncase` is a neural network compiler for AI accelerators.

Expand Down
1 change: 1 addition & 0 deletions conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def requirements(self):
if self.options.tests:
self.requires('gtest/1.10.0')
self.requires('ortki/0.0.2')
self.requires('rapidjson/1.1.x')

if self.options.python:
self.requires('pybind11/2.6.1')
Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ dependencies = ["numpy"]
homepage = "https://github.com/kendryte/nncase"

[build-system]
requires = ["setuptools>=42", "wheel", "conan<=1.59", "ninja"]
requires = ["setuptools>=42", "wheel", "conan<=1.59", "ninja", "gitpython"]
build-backend = "setuptools.build_meta"

[tool.cibuildwheel]
build = ["cp37*", "cp38*", "cp39*", "cp310*"]
Expand Down
2 changes: 1 addition & 1 deletion requirements.test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ pytest-xdist
pyyaml
pythonnet==3.0.1
clr_loader==0.2.4
toml==0.10.2
toml==0.10.2
14 changes: 13 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
import io
import re
import time

import subprocess
from git.repo import Repo
# See ref: https://stackoverflow.com/a/51575996


Expand Down Expand Up @@ -277,8 +278,19 @@ def find_version():
version_prefix = re.findall(r"NNCASE_VERSION \"(.+)\"", version_file)

if version_prefix:
repo_path = os.getcwd()
repo = Repo(repo_path)
if repo.tags:
latest_commit = subprocess.check_output(
['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
tagged_commit = subprocess.check_output(
['git', 'rev-list', '-n', '1', repo.tags[-1].name]).decode('utf-8').strip()
if latest_commit == tagged_commit:
return version_prefix[0]

version_suffix = time.strftime("%Y%m%d", time.localtime())
return version_prefix[0] + "." + version_suffix

raise RuntimeError("Unable to find version string.")


Expand Down
4 changes: 2 additions & 2 deletions src/Native/src/kernels/stackvm/reference/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ dims_t infer_shape(gsl::span<const size_t> origin_in_shape,
gsl::span<const size_t> block_shape,
const paddings_t &crops) {
auto d4 = fixed_dims(0, 2, 3, 1);
auto d3 = fixed_dims(1, 2, 0);
auto d3 = fixed_dims(0, 2, 1);
auto inPerm = origin_in_shape.size() == 4
? gsl::span<const size_t>{d4.data(), d4.size()}
: gsl::span<const size_t>{d3.data(), d3.size()};
Expand All @@ -123,7 +123,7 @@ dims_t infer_shape(gsl::span<const size_t> origin_in_shape,
in_shape.end());
}
auto outd4 = fixed_dims(0, 3, 1, 2);
auto outd3 = fixed_dims(2, 0, 1);
auto outd3 = fixed_dims(0, 2, 1);
auto outPerm = origin_in_shape.size() == 4
? gsl::span<const size_t>{outd4.data(), outd4.size()}
: gsl::span<const size_t>{outd3.data(), outd3.size()};
Expand Down
13 changes: 11 additions & 2 deletions src/Native/src/kernels/stackvm/reference/matmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,17 @@ result<void> matmul_unit_impl(const T *input_a, const T *input_b, T *output,

template <typename T>
result<void> matmul_impl(const T *input_a, const T *input_b, T *output,
gsl::span<const size_t> in_a_shape,
gsl::span<const size_t> in_b_shape) noexcept {
gsl::span<const size_t> in_a_shape_,
gsl::span<const size_t> in_b_shape_) noexcept {
dims_t in_a_shape = in_a_shape_;
dims_t in_b_shape = in_b_shape_;
if (in_a_shape.size() == 1) {
in_a_shape.insert(in_a_shape.begin(), 1);
}

if (in_b_shape.size() == 1) {
in_b_shape.insert(in_b_shape.end(), 1);
}
auto new_a_shape = to_4d(in_a_shape);
auto new_b_shape = to_4d(in_b_shape);
auto a_unit_size = new_a_shape[2] * new_a_shape[3];
Expand Down
6 changes: 2 additions & 4 deletions src/Native/src/kernels/stackvm/reference/reduce_arg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,8 @@ result<void> reduce_arg_impl(TReducer &&reducer, T init_value, const T *input,
out_map[out_idx].clear();
out_map[out_idx].push_back(index[axes[0]]);
dst = src;
} else if constexpr (std::is_same_v<T, float>) {
if (fabs(src - dst) < epsilon) {
out_map[out_idx].push_back(index[axes[0]]);
}
} else if (std::fabs(src - dst) < epsilon) {
out_map[out_idx].push_back(index[axes[0]]);
}
return ok();
}));
Expand Down
23 changes: 21 additions & 2 deletions src/Native/src/kernels/stackvm/shape_infer.h
Original file line number Diff line number Diff line change
Expand Up @@ -287,13 +287,24 @@ inline dims_t onehot_infer_shape(gsl::span<const size_t> indices_shape,
return new_shape;
}

inline result<dims_t> matmul_infer_shape(gsl::span<const size_t> lhs_shape,
gsl::span<const size_t> rhs_shape) {
inline result<dims_t> matmul_infer_shape(gsl::span<const size_t> lhs_shape_,
gsl::span<const size_t> rhs_shape_) {
dims_t lhs_shape = lhs_shape_;
dims_t rhs_shape = rhs_shape_;

if (lhs_shape.size() == 2 && rhs_shape.size() == 2) {
auto new_shape = dims_t{lhs_shape[0], rhs_shape[1]};
return ok(new_shape);
}

if (lhs_shape.size() == 1) {
lhs_shape.insert(lhs_shape.begin(), 1);
}

if (rhs_shape.size() == 1) {
rhs_shape.insert(rhs_shape.end(), 1);
}

auto new_a_shape = runtime::to_4d(lhs_shape);
auto new_b_shape = runtime::to_4d(rhs_shape);
auto big_shape = std::max(lhs_shape.size(), rhs_shape.size());
Expand All @@ -304,6 +315,14 @@ inline result<dims_t> matmul_infer_shape(gsl::span<const size_t> lhs_shape,
}
new_shape.push_back(lhs_shape[lhs_shape.size() - 2]);
new_shape.push_back(rhs_shape.back());
if (lhs_shape_.size() == 1) {
new_shape.erase(new_shape.begin() + big_shape - 2);
big_shape--;
}

if (rhs_shape_.size() == 1) {
new_shape.erase(new_shape.begin() + big_shape - 1);
}
return ok(new_shape);
}

Expand Down
4 changes: 2 additions & 2 deletions src/Nncase.Core/IR/Tensors/Functional.cs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public static Expr NHWCToNCHW(Expr input)
}
else if (input.CheckedShape.Rank == 3)
{
perm = new[] { 2, 0, 1 };
perm = new[] { 0, 2, 1 };
}
else
{
Expand All @@ -50,7 +50,7 @@ public static Expr NCHWToNHWC(Expr input)
}
else if (input.CheckedShape.Rank == 3)
{
perm = new[] { 1, 2, 0 };
perm = new[] { 0, 2, 1 };
}
else
{
Expand Down
2 changes: 1 addition & 1 deletion src/Nncase.Diagnostics/Diagnostics/CSharpPrintVisitor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ public override string VisitType(CallableType type) =>
public override string VisitType(TensorType type) => type.DType switch
{
PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()),
PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}",
PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}",
ValueType => $"{type.DType.ToString()}",
_ => throw new NotSupportedException(type.DType.GetType().Name),
};
Expand Down
2 changes: 1 addition & 1 deletion src/Nncase.Diagnostics/Diagnostics/ILPrintVisitor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ public override string VisitType(CallableType type) =>
public override string VisitType(TensorType type) => type.DType switch
{
PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()),
PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}",
PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}",
ValueType => $"{type.DType.ToString()}",
_ => throw new NotSupportedException(type.DType.GetType().Name),
};
Expand Down
2 changes: 1 addition & 1 deletion src/Nncase.Diagnostics/Diagnostics/ScriptPrintVisitor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ public ScriptPrintVisitor(TextWriter textWriter, bool display_callable)
public override string VisitType(TensorType type) => type.DType switch
{
PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()),
PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}",
PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}",
ValueType vtype => vtype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()),
_ => throw new NotSupportedException(type.DType.GetType().Name),
};
Expand Down
8 changes: 4 additions & 4 deletions src/Nncase.Evaluator/NN/BatchToSpace.cs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ public Expr Visit(IShapeEvaluateContext context, BatchToSpace target)

if (input.CheckedShape.Rank == 3)
{
inShape = Stack(new IR.Tuple(inShape[1], inShape[2], inShape[0]), 0);
inShape = Stack(new IR.Tuple(inShape[0], inShape[2], inShape[1]), 0);
}

var blockShape = context.GetArgument(target, BatchToSpace.BlockShape);
Expand Down Expand Up @@ -142,7 +142,7 @@ public Expr Visit(IShapeEvaluateContext context, BatchToSpace target)

if (input.CheckedShape.Rank == 3)
{
return Stack(new IR.Tuple(outShapeList[2], outShapeList[0], outShapeList[1]), 0);
return Stack(new IR.Tuple(outShapeList[0], outShapeList[2], outShapeList[1]), 0);
}

throw new NotImplementedException();
Expand Down Expand Up @@ -186,7 +186,7 @@ private IRType Visit(ITypeInferenceContext context, BatchToSpace target, TensorT
{
var inShape = input.Shape.Rank == 4
? TypeInference.ApplyPerm(input.Shape, new[] { 0, 2, 3, 1 })
: TypeInference.ApplyPerm(input.Shape, new[] { 1, 2, 0 });
: TypeInference.ApplyPerm(input.Shape, new[] { 0, 2, 1 });
var batch = inShape[0];
if (context.GetArgument(target, BatchToSpace.BlockShape) is TensorConst blockShapeValue &&
context.GetArgument(target, BatchToSpace.Crops) is TensorConst cropsValue)
Expand All @@ -211,7 +211,7 @@ private IRType Visit(ITypeInferenceContext context, BatchToSpace target, TensorT
var outShape =
outShapeList.Length == 4
? TypeInference.ApplyPerm(outShapeList, new[] { 0, 3, 1, 2 })
: TypeInference.ApplyPerm(outShapeList, new[] { 2, 0, 1 });
: TypeInference.ApplyPerm(outShapeList, new[] { 0, 2, 1 });
return input with { Shape = outShape };
}
else
Expand Down
20 changes: 10 additions & 10 deletions src/Nncase.Evaluator/NN/LayerNorm.cs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ namespace Nncase.Evaluator.NN;
/// <summary>
/// Evaluator for <see cref="LayerNorm"/>.
/// </summary>
public class LayerNormEvaluator : IEvaluator<LayerNorm>, ITypeInferencer<LayerNorm>, ICostEvaluator<LayerNorm>, IShapeEvaluator<LayerNorm>, IMetricEvaluator<LayerNorm>
public class LayerNormEvaluator : IEvaluator<LayerNorm>, ITypeInferencer<LayerNorm>, ICostEvaluator<LayerNorm>,
IShapeEvaluator<LayerNorm>, IMetricEvaluator<LayerNorm>
{
/// <inheritdoc/>
public IValue Visit(IEvaluateContext context, LayerNorm layerNorm)
Expand Down Expand Up @@ -77,21 +78,19 @@ private Tensor LayerNormImpl(Tensor input, Tensor scale, Tensor bias, int axis,
float[] inputArray = input.ToArray<float>();
float[] outputArray = new float[inputArray.Length];
int[] inShape = input.Shape.ToValueArray();
if (axis < 0)
{
axis += inShape.Length;
}

for (int i = 0; i < axis; i++)
{
outputSize *= inShape[i];
}

for (int i = axis; i < inShape.Length; i++)
{
if (i < 0)
{
innerSize *= inShape[^System.Math.Abs(i)];
}
else
{
innerSize *= inShape[i];
}
innerSize *= inShape[i];
}

for (int batch = 0; batch < outputSize; batch++)
Expand Down Expand Up @@ -131,7 +130,8 @@ private Tensor LayerNormImpl(Tensor input, Tensor scale, Tensor bias, int axis,

for (int i = 0; i < innerSize; i++)
{
outputArray[(i + (batch * innerSize)) % outputArray.Length] = (div[i] * scale.ToArray<float>()[i % scale.Length]) + bias.ToArray<float>()[i % bias.Length];
outputArray[(i + (batch * innerSize)) % outputArray.Length] =
(div[i] * scale.ToArray<float>()[i % scale.Length]) + bias.ToArray<float>()[i % bias.Length];
}
}

Expand Down
Loading

0 comments on commit 4b59fd1

Please sign in to comment.