Skip to content

Commit

Permalink
Run lazy tensor codegen in generate_code.py (pytorch#73996)
Browse files Browse the repository at this point in the history
Summary:
Hooks into existing autograd codegen script (generate_code.py) to take advantage of its integrations into buck/cmake/bazel.

Adds a new option (--gen_lazy_ts_backend) to. generate_code.py, calling this from CMake OSS build and fbcode build, but not from other internal xplat/ovrsource builds (these could be opted in later)

Bazel support is added in a later diff.

Includes one generated file (torch/csrc/lazy/generated/LazyIr.h) in a unit test (test/cpp/lazy/test_ir.cpp) to partially verify the generator is working, but does not compile the remaining output sources from the generator yet as they depend on other files not yet landed from lazy_tensor_staging branch.

Pull Request resolved: pytorch#73996

Test Plan: OSS/internal CI - verify all builds are working and test_ir.cpp compiles LazyIr.h

Reviewed By: ezyang

Differential Revision: D34408536

fbshipit-source-id: 8af0aea3b95d81eccafc17d64390d70ddd176515
(cherry picked from commit f930612)
  • Loading branch information
wconstab authored and pytorchmergebot committed Mar 17, 2022
1 parent 80e0d8a commit 72b1194
Show file tree
Hide file tree
Showing 12 changed files with 315 additions and 25 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ torch/_VF.pyi
torch/nn/functional.pyi
torch/utils/data/datapipes/datapipe.pyi
torch/csrc/autograd/generated/*
torch/csrc/lazy/generated/*
# Listed manually because some files in this directory are not generated
torch/testing/_internal/generated/annotated_fn_args.py
torch/testing/_internal/data/*.pt
Expand Down
3 changes: 3 additions & 0 deletions BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -1915,6 +1915,9 @@ cc_test(
srcs = glob([
"test/cpp/lazy/*.cpp",
"test/cpp/lazy/*.h",
], exclude=[
# skip these since they depend on generated LazyIr.h which isn't available in bazel yet
"test/cpp/lazy/test_ir.cpp",
]),
linkstatic = True,
tags = [
Expand Down
176 changes: 176 additions & 0 deletions aten/src/ATen/native/ts_native_functions.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
backend: Lazy
cpp_namespace: torch::lazy
full_codegen:
- _adaptive_avg_pool2d
- _adaptive_avg_pool2d_backward
- _log_softmax
- _log_softmax_backward_data
- _softmax
- _softmax_backward_data
- abs
- add.Tensor
- addcdiv
- addcmul
- addmm
- arange.start_out
- all
- any
- avg_pool2d
- avg_pool2d_backward
- baddbmm
- binary_cross_entropy
- binary_cross_entropy_backward
- bitwise_and.Tensor
- bitwise_or.Tensor
- bmm
- clamp
- clamp_min
- constant_pad_nd
- convolution
- convolution_backward
- cos
- cumsum
- div.Tensor
- div.Tensor_mode
- elu
- elu_backward
- embedding
- embedding_dense_backward
- eq.Scalar
- eq.Tensor
- exp
- flip
- floor
- frac
- gather
- ge.Scalar
- ge.Tensor
- gelu
- gelu_backward
- glu
- glu_backward
- grid_sampler_2d
- grid_sampler_2d_backward
- gt.Scalar
- gt.Tensor
- hardsigmoid
- index_select
- kl_div_backward
- l1_loss_backward
- le.Scalar
- le.Tensor
- leaky_relu
- leaky_relu_backward
- log
- log2
- logdet
- log_sigmoid_backward
- log_sigmoid_forward
- lt.Scalar
- lt.Tensor
- masked_fill_.Scalar
- masked_fill_.Tensor
- max
- max.dim
- max_pool2d_with_indices
- max_pool2d_with_indices_backward
- maximum
- mean
- mean.dim
- min
- minimum
- mm
- mul.Tensor
- mv
- native_dropout
- native_dropout_backward
- native_layer_norm
- native_layer_norm_backward
- ne.Scalar
- ne.Tensor
- neg
- nll_loss_backward
- nll_loss_forward
- nll_loss2d_backward
- nll_loss2d_forward
- norm.ScalarOpt_dim
- pow.Tensor_Scalar
- pow.Tensor_Tensor
- reciprocal
- relu
- relu_
- remainder.Tensor
- rsqrt
- scatter_add
- sgn
- sigmoid
- sigmoid_backward
- silu
- smooth_l1_loss
- smooth_l1_loss_backward
- softplus
- softplus_backward
- sort
- sqrt
- std
- std.dim
- std.correction
- sum
- sum.dim_IntList
- tanh
- tanh_backward
- threshold
- threshold_backward
- topk
- trace
- tril
- triu
- trunc
- upsample_bilinear2d
- upsample_bilinear2d_backward
- upsample_nearest2d
- upsample_nearest2d_backward
- zero_
supported:
- as_strided
- as_strided_
- bernoulli
- bernoulli_.float
- cat
- clone
- _copy_from
- _copy_from_and_resize
- empty.memory_format
- empty_strided
- expand
- fill_.Scalar
- native_batch_norm
- native_batch_norm_backward
- normal_
- max_pool3d_with_indices
- max_pool3d_with_indices_backward
- permute
- random_
- random_.from
- random_.to
- repeat
- select.int
- slice.Tensor
- squeeze
- squeeze.dim
- squeeze_
- squeeze_.dim
- stack
- t
- t_
- transpose.int
- transpose_
- unsqueeze
- unsqueeze_
- sub.Tensor
- sub.Scalar
- view
- alias
- _unsafe_view
autograd:
- max_pool3d
9 changes: 9 additions & 0 deletions aten/src/ATen/templates/DispatchKeyNativeFunctions.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
// ${generated_comment}
${includes}
${native_functions_include}

${namespace_prologue}

${native_function_definitions}

${namespace_epilogue}
21 changes: 21 additions & 0 deletions aten/src/ATen/templates/LazyIr.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#pragma once

// This file contains autogenerated LazyTensor IR nodes
${lazy_ir_sysinc}
${lazy_ir_inc}

namespace torch {
namespace lazy {
using at::operator<<;

// kNullValue is used to contribute a static hash value any time
// a node has an Optional<Value> input that is nullopt. It is important
// to differentiate between HASH(nullopt, something) and HASH(something, nullopt),
// and using kNullValue in the hash function in the order of arguments
// serves this purpose.
static const torch::lazy::Value kNullValue = torch::lazy::Value();

${ir_declarations}

} // namespace lazy
} // namespace torch
9 changes: 9 additions & 0 deletions caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,8 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
set(GENERATED_H_TORCH
"${TORCH_SRC_DIR}/csrc/autograd/generated/Functions.h"
"${TORCH_SRC_DIR}/csrc/autograd/generated/variable_factories.h"
"${TORCH_SRC_DIR}/csrc/lazy/generated/LazyIr.h"
"${TORCH_SRC_DIR}/csrc/lazy/generated/LazyNativeFunctions.h"
)

if(NOT INTERN_DISABLE_AUTOGRAD)
Expand Down Expand Up @@ -407,8 +409,15 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
$<$<BOOL:${INTERN_DISABLE_AUTOGRAD}>:--disable-autograd>
$<$<BOOL:${SELECTED_OP_LIST}>:--selected-op-list-path="${SELECTED_OP_LIST}">
--force_schema_registration
--gen_lazy_ts_backend
DEPENDS
"${TORCH_ROOT}/aten/src/ATen/native/native_functions.yaml"
"${TORCH_ROOT}/aten/src/ATen/native/ts_native_functions.yaml"
"${TORCH_ROOT}/torch/csrc/lazy/core/shape_inference.h"
"${TORCH_ROOT}/aten/src/ATen/templates/DispatchKeyNativeFunctions.h"
"${TORCH_ROOT}/aten/src/ATen/templates/DispatchKeyNativeFunctions.cpp"
"${TORCH_ROOT}/aten/src/ATen/templates/LazyIr.h"
"${TORCH_ROOT}/aten/src/ATen/templates/RegisterDispatchKey.cpp"
"${TOOLS_PATH}/autograd/templates/VariableType.h"
"${TOOLS_PATH}/autograd/templates/VariableType.cpp"
"${TOOLS_PATH}/autograd/templates/ADInplaceOrViewType.cpp"
Expand Down
1 change: 1 addition & 0 deletions test/cpp/lazy/test_ir.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <gtest/gtest.h>

#include <torch/csrc/lazy/generated/LazyIr.h>
#include <c10/util/Exception.h>
#include <torch/csrc/lazy/core/config.h>
#include <torch/csrc/lazy/core/ir.h>
Expand Down
5 changes: 5 additions & 0 deletions test/cpp/lazy/test_misc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ TEST(HashTest, Sanity) {
auto b = std::vector<int32_t>({1, 1, 2, 3, 5, 8, 12});
test_hash_repeatable_sensitive(a, b);
test_hash_repeatable_sensitive(c10::ArrayRef<int32_t>(a), c10::ArrayRef<int32_t>(b));

// vector<bool> is a special case bc it is implemented as vector<bit>
auto bool_a = std::vector<bool>({true, false, false, true});
auto bool_b = std::vector<bool>({true, true, false, true});
test_hash_repeatable_sensitive(bool_a, bool_b);
}

} // namespace lazy
Expand Down
Loading

0 comments on commit 72b1194

Please sign in to comment.