Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TOPI] C++ doc #320

Merged
merged 1 commit into from
Aug 14, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion HalideIR
Submodule HalideIR updated 1 files
+1 −1 src/ir/IR.h
135 changes: 115 additions & 20 deletions topi/include/topi/broadcast.h
Original file line number Diff line number Diff line change
@@ -1,55 +1,150 @@
/*
/*!
* Copyright (c) 2017 by Contributors
* \brief Broadcast op constructions
* \file broadcast.h
* \file topi/broadcast.h
*/
#ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_

#include <topi/detail/broadcast.h>
#include <string>

#include "topi/detail/broadcast.h"
#include "topi/tags.h"

namespace topi {

inline tvm::Tensor broadcast_to(const tvm::Tensor& I,
const tvm::Array<tvm::Expr>& output_shape) {
CHECK_GE(output_shape.size(), I->shape.size())
/*!
* \brief Creates an operation that broadcasts a tensor into a compatible
* shape according to numpy's rules
*
* \param t The input tensor
* \param output_shape The target output shape, must be compatible
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a broadcast operation
*/
inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& output_shape,
std::string name = "tensor",
std::string tag = kBroadcast) {
CHECK_GE(output_shape.size(), t->shape.size())
<< "Not a broadcast, output dimensionality smaller than input.\noutput: "
<< output_shape << "\nvs\ninput: " << I;
auto bh = detail::BroadcastShape(output_shape, I->shape);
<< output_shape << "\nvs\ninput: " << t;
auto bh = detail::BroadcastShape(output_shape, t->shape);
CHECK_EQ(output_shape.size(), bh.common_shape.size());
for (int i = 0; i < output_shape.size(); ++i) {
CHECK(tvm::ir::Equal(output_shape[i], bh.common_shape[i]));
}
auto l = [&](tvm::Array<tvm::Var> ovars) {
return I(detail::InputIndexFromBroadcast(ovars, I, bh.vars2, bh.all_vars));
return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars));
};
return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l);
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
}

inline tvm::Tensor broadcast_add(const tvm::Tensor& A, const tvm::Tensor& B) {
/*!
* \brief Creates an operation that performs pointwise addition of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to add
* \param B The second tensor to add
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise addition with broadcast
*/
inline tvm::Tensor broadcast_add(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a + b; };
return detail::WithBroadcast(l, A, B);
return detail::WithBroadcast(l, A, B, name, tag);
}

inline tvm::Tensor broadcast_sub(const tvm::Tensor& A, const tvm::Tensor& B) {
/*!
* \brief Creates an operation that performs pointwise subtraction of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to subtract from the first
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise subtraction with broadcast
*/
inline tvm::Tensor broadcast_sub(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a - b; };
return detail::WithBroadcast(l, A, B);
return detail::WithBroadcast(l, A, B, name, tag);
}

inline tvm::Tensor broadcast_mul(const tvm::Tensor& A, const tvm::Tensor& B) {
/*!
* \brief Creates an operation that performs pointwise multiplication of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to multiply
* \param B The second tensor to multiply
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise multiplication with broadcast
*/
inline tvm::Tensor broadcast_mul(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a * b; };
return detail::WithBroadcast(l, A, B);
return detail::WithBroadcast(l, A, B, name, tag);
}

inline tvm::Tensor broadcast_div(const tvm::Tensor& A, const tvm::Tensor& B) {
/*!
* \brief Creates an operation that performs pointwise division of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to divide the first tensor with
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise division with broadcast
*/
inline tvm::Tensor broadcast_div(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a / b; };
return detail::WithBroadcast(l, A, B);
return detail::WithBroadcast(l, A, B, name, tag);
}

inline tvm::Tensor broadcast_mod(const tvm::Tensor& A, const tvm::Tensor& B) {
/*!
* \brief Creates an operation that performs pointwise modulo remainder of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to compute A % B
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise modulo remainder with
* broadcast
*/
inline tvm::Tensor broadcast_mod(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a % b; };
return detail::WithBroadcast(l, A, B);
return detail::WithBroadcast(l, A, B, name, tag);
}

} // namespace topi
Expand Down
17 changes: 12 additions & 5 deletions topi/include/topi/detail/broadcast.h
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
/*
/*!
* Copyright (c) 2017 by Contributors
* \brief Detail broadcast.
* \file broadcast.h
* \file topi/detail/broadcast.h
*/
#ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_

#include <algorithm>
#include <deque>
#include <string>

#include "tvm/ir_pass.h"
#include "tvm/tvm.h"
Expand Down Expand Up @@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(


template <typename FBinaryExpr>
inline tvm::Tensor WithBroadcast(FBinaryExpr op, const tvm::Tensor& A,
const tvm::Tensor& B) {
inline tvm::Tensor WithBroadcast(FBinaryExpr op,
const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = "") {
auto bh = BroadcastShape(A->shape, B->shape);
auto l = [&](tvm::Array<tvm::Var> ovars) {
return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)),
B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars)));
};
return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l);
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
}

} // namespace detail
Expand Down
17 changes: 11 additions & 6 deletions topi/include/topi/ewise.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,22 @@
#ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_

#include <tvm/tvm.h>
#include <string>

#include "topi/tags.h"
#include "tvm/tvm.h"

namespace topi {
using namespace tvm;

// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
}, "tensor", "ewise"); \
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
}, name, tag); \
}

TOPI_DECLARE_UNARY_OP(exp);
Expand Down
Loading