Skip to content

Commit

Permalink
Rangify constructor of HostTensorDescriptor & Tensor<> (#445)
Browse files Browse the repository at this point in the history
* Rangify STL algorithms

This commit adapts rangified std::copy(), std::fill() & std::transform()

* Rangify check_err()

By rangifying check_err(), we can not only compare values between
std::vector<>s, but also compare any ranges which have same value
type.

* Allow constructing Tensor<> like a HostTensorDescriptor

* Simplify Tensor<> object construction logics

* Remove more unnecessary 'HostTensorDescriptor' objects

* Re-format example code

* Re-write more HostTensorDescriptor ctor call
  • Loading branch information
poyenc authored Nov 11, 2022
1 parent 37f2e91 commit 4a2a56c
Show file tree
Hide file tree
Showing 103 changed files with 657 additions and 649 deletions.
12 changes: 7 additions & 5 deletions example/01_gemm/gemm_xdl_skip_b_lds_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_skip_b_lds.hpp"

#include "ck/library/utility/literals.hpp"

using F16 = ck::half_t;
using F32 = float;

Expand Down Expand Up @@ -135,15 +137,15 @@ int main(int argc, char* argv[])

auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
using namespace ck::literals;

if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
return HostTensorDescriptor({row, col}, {stride, 1_uz});
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
return HostTensorDescriptor({row, col}, {1_uz, stride});
}
};

Expand Down Expand Up @@ -240,7 +242,7 @@ int main(int argc, char* argv[])
show_2d_matrix(std::cout << "c_host :", c_m_n_host_result) << std::endl;
}
#endif
ck::utils::check_err(c_m_n_device_result.mData, c_m_n_host_result.mData);
ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
}

return 0;
Expand Down
4 changes: 2 additions & 2 deletions example/01_gemm/run_gemm_example.inc
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,11 @@ bool run_gemm(const ProblemSize& problem_size, const ExecutionConfig& config)

c_m_n_device_result = c_m_n_device_result_converted.CopyAsType<CDataType>();

return ck::utils::check_err(c_m_n_device_result_converted.mData, c_m_n_host_result.mData);
return ck::utils::check_err(c_m_n_device_result_converted, c_m_n_host_result);
#else
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());

return ck::utils::check_err(c_m_n_device_result.mData, c_m_n_host_result.mData);
return ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
#endif
}

Expand Down
14 changes: 7 additions & 7 deletions example/02_gemm_bilinear/gemm_bilinear_xdl_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"

Expand Down Expand Up @@ -177,15 +178,15 @@ int main(int argc, char* argv[])

auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
using namespace ck::literals;

if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
return HostTensorDescriptor({row, col}, {stride, 1_uz});
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
return HostTensorDescriptor({row, col}, {1_uz, stride});
}
};

Expand Down Expand Up @@ -271,8 +272,7 @@ int main(int argc, char* argv[])

if(do_verification)
{
Tensor<CShuffleDataType> c_m_n(HostTensorDescriptor(
std::vector<std::size_t>{static_cast<std::size_t>(M), static_cast<std::size_t>(N)}));
Tensor<CShuffleDataType> c_m_n({M, N});

using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
BDataType,
Expand All @@ -299,7 +299,7 @@ int main(int argc, char* argv[])

e_device_buf.FromDevice(e_m_n_device_result.mData.data());

return ck::utils::check_err(e_m_n_device_result.mData, e_m_n_host_result.mData) ? 0 : 1;
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
}

return 0;
Expand Down
11 changes: 6 additions & 5 deletions example/03_gemm_bias_relu/gemm_bias_relu_xdl_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"

Expand Down Expand Up @@ -155,15 +156,15 @@ int main(int argc, char* argv[])

auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
using namespace ck::literals;

if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
return HostTensorDescriptor({row, col}, {stride, 1_uz});
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
return HostTensorDescriptor({row, col}, {1_uz, stride});
}
};

Expand Down Expand Up @@ -275,7 +276,7 @@ int main(int argc, char* argv[])
}
}

return ck::utils::check_err(e_m_n_device_result.mData, e_m_n_host_result.mData) ? 0 : 1;
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
}

return 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ bool run_gemm_add_add_fastgelu(const ProblemSize& problem_size, const ExecutionC

if(config.do_verification)
{
Tensor<AccDataType> c_m_n(HostTensorDescriptor{M, N});
Tensor<AccDataType> c_m_n({M, N});

auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker();
Expand All @@ -147,9 +147,9 @@ bool run_gemm_add_add_fastgelu(const ProblemSize& problem_size, const ExecutionC
#ifdef BUILD_INT4_EXAMPLE
const Tensor<EDataType> e_m_n_device_result_converted(e_m_n_device_result);

return ck::utils::check_err(e_m_n_device_result_converted.mData, e_m_n_host_result.mData);
return ck::utils::check_err(e_m_n_device_result_converted, e_m_n_host_result);
#else
return ck::utils::check_err(e_m_n_device_result.mData, e_m_n_host_result.mData);
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
#endif
}

Expand Down
5 changes: 3 additions & 2 deletions example/09_convnd_fwd/convnd_fwd_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"

#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
Expand Down Expand Up @@ -84,7 +85,7 @@ bool run_grouped_conv_fwd(bool do_verification,
std::array<ck::index_t, NDimSpatial> input_left_pads{};
std::array<ck::index_t, NDimSpatial> input_right_pads{};

auto copy = [](auto& x, auto& y) { std::copy(x.begin(), x.end(), y.begin()); };
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };

copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
Expand Down Expand Up @@ -164,7 +165,7 @@ bool run_grouped_conv_fwd(bool do_verification,
out_device_buf.FromDevice(out_device.mData.data());

return ck::utils::check_err(
out_device.mData, out_host.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
out_device, out_host, "Error: incorrect results!", 1e-5f, 1e-4f);
}

return true;
Expand Down
12 changes: 2 additions & 10 deletions example/10_convnd_fwd_multiple_d_multiple_reduce/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"

#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
Expand Down Expand Up @@ -140,9 +141,7 @@ make_r0_host_tensor_descriptor(const ck::utils::conv::ConvParam& problem_size)
{
std::vector<ck::index_t> dimensions{problem_size.G_, problem_size.N_};

std::copy(begin(problem_size.output_spatial_lengths_),
end(problem_size.output_spatial_lengths_),
std::back_inserter(dimensions));
ck::ranges::copy(problem_size.output_spatial_lengths_, std::back_inserter(dimensions));

return HostTensorDescriptor(dimensions);
}
Expand All @@ -158,10 +157,3 @@ void unpack_host_tensor_descriptor(const HostTensorDescriptor& descriptor,
assert(size(descriptor.GetStrides()) == size(strides));
std::copy_n(begin(descriptor.GetStrides()), size(descriptor.GetStrides()), begin(strides));
}

template <typename Range, typename OutputIterator>
auto copy(const Range& range, OutputIterator iter)
-> decltype(std::copy(std::begin(range), std::end(range), iter))
{
return std::copy(std::begin(range), std::end(range), iter);
}
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,10 @@ bool run_convnd_fwd_max(const ck::utils::conv::ConvParam& problem_size,
conv_output_g_n_k_wos_desc, conv_output_g_n_k_wos_lengths, conv_output_g_n_k_wos_strides);
unpack_host_tensor_descriptor(r0_desc, r0_lengths, r0_strides);

copy(problem_size.conv_filter_strides_, begin(conv_filter_strides));
copy(problem_size.conv_filter_dilations_, begin(conv_filter_dilations));
copy(problem_size.input_left_pads_, begin(input_left_pads));
copy(problem_size.input_right_pads_, begin(input_right_pads));
ck::ranges::copy(problem_size.conv_filter_strides_, begin(conv_filter_strides));
ck::ranges::copy(problem_size.conv_filter_dilations_, begin(conv_filter_dilations));
ck::ranges::copy(problem_size.input_left_pads_, begin(input_left_pads));
ck::ranges::copy(problem_size.input_right_pads_, begin(input_right_pads));

// run Conv + Reduction on device
auto conv = DeviceInstance<NDimSpatial>{};
Expand Down Expand Up @@ -273,16 +273,13 @@ bool run_convnd_fwd_max(const ck::utils::conv::ConvParam& problem_size,
conv_output_device_buf.FromDevice(conv_output_device.mData.data());
r0_device_buf.FromDevice(r0_device.mData.data());

return ck::utils::check_err(conv_output_device.mData,
conv_output_host.mData,
return ck::utils::check_err(conv_output_device,
conv_output_host,
"Error: incorrect results! (Matrix E)",
1e-5f,
1e-4f) &&
ck::utils::check_err(r0_device.mData,
r0_host.mData,
"Error: incorrect results! (Matrix R0)",
1e-5f,
1e-4f);
ck::utils::check_err(
r0_device, r0_host, "Error: incorrect results! (Matrix R0)", 1e-5f, 1e-4f);
}

return true;
Expand Down
4 changes: 2 additions & 2 deletions example/12_reduce/reduce_blockwise_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -324,12 +324,12 @@ int reduce_blockwise_impl(bool do_verification,
#endif
out_dev.FromDevice(out.mData.data());

pass = pass && ck::utils::check_err(out.mData, out_ref.mData);
pass = pass && ck::utils::check_err(out, out_ref);

if(OutputIndex)
{
out_index_dev.FromDevice(out_indices.mData.data());
pass = pass && ck::utils::check_err(out_indices.mData, out_indices_ref.mData);
pass = pass && ck::utils::check_err(out_indices, out_indices_ref);
};
};

Expand Down
2 changes: 1 addition & 1 deletion example/12_reduce/reduce_blockwise_two_call.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ int main(int argc, char* argv[])
if(do_verify)
{
out_dev.FromDevice(out.mData.data());
pass = pass && ck::utils::check_err(out.mData, out_ref.mData);
pass = pass && ck::utils::check_err(out, out_ref);
};

return (pass ? 0 : 1);
Expand Down
2 changes: 1 addition & 1 deletion example/12_reduce/reduce_multiblock_atomic_add_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ int reduce_multiblock_atomic_add_impl(bool do_verification,
if(do_verification)
{
out_dev.FromDevice(out.mData.data());
pass = pass && ck::utils::check_err(out.mData, out_ref.mData);
pass = pass && ck::utils::check_err(out, out_ref);
};

return (pass ? 0 : 1);
Expand Down
15 changes: 8 additions & 7 deletions example/13_pool2d_fwd/pool2d_fwd_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"

template <typename InDataType,
typename OutDataType,
Expand Down Expand Up @@ -172,16 +173,16 @@ bool pool_test(bool do_verification,
// tensor layout
auto f_host_tensor_descriptor =
[](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W, auto layout) {
using namespace ck::literals;

if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NCHW>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({N_, C_, H, W}),
std::vector<std::size_t>({C_ * H * W, H * W, W, 1}));
return HostTensorDescriptor({N_, C_, H, W}, {C_ * H * W, H * W, W, 1_uz});
}
else if constexpr(ck::is_same<decltype(layout),
ck::tensor_layout::convolution::NHWC>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({N_, C_, H, W}),
std::vector<std::size_t>({C_ * H * W, 1, W * C_, C_}));
return HostTensorDescriptor({N_, C_, H, W}, {C_ * H * W, 1_uz, W * C_, C_});
}
};

Expand Down Expand Up @@ -267,14 +268,14 @@ bool pool_test(bool do_verification,

out_device_buf.FromDevice(out_n_c_ho_wo_device.mData.data());

pass = pass && ck::utils::check_err(out_n_c_ho_wo_device.mData, out_n_c_ho_wo_host.mData);
pass = pass && ck::utils::check_err(out_n_c_ho_wo_device, out_n_c_ho_wo_host);

if constexpr(OutputIndex)
{
out_indices_device_buf.FromDevice(out_indices_n_c_ho_wo_device.mData.data());

pass = pass && ck::utils::check_err(out_indices_n_c_ho_wo_device.mData,
out_indices_n_c_ho_wo_host.mData);
pass = pass &&
ck::utils::check_err(out_indices_n_c_ho_wo_device, out_indices_n_c_ho_wo_host);
};
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"

Expand Down Expand Up @@ -133,15 +134,15 @@ int main(int argc, char* argv[])

auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
using namespace ck::literals;

if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
return HostTensorDescriptor({row, col}, {stride, 1_uz});
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
return HostTensorDescriptor({row, col}, {1_uz, stride});
}
};

Expand Down Expand Up @@ -225,7 +226,7 @@ int main(int argc, char* argv[])

ref_invoker.Run(ref_argument);

return ck::utils::check_err(c_m_n_device_result.mData, c_m_n_host_result.mData) ? 0 : 1;
return ck::utils::check_err(c_m_n_device_result, c_m_n_host_result) ? 0 : 1;
}

return 0;
Expand Down
1 change: 1 addition & 0 deletions example/15_grouped_gemm/grouped_gemm_xdl_bfp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"

template <ck::index_t... Is>
Expand Down
1 change: 1 addition & 0 deletions example/15_grouped_gemm/grouped_gemm_xdl_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"

template <ck::index_t... Is>
Expand Down
1 change: 1 addition & 0 deletions example/15_grouped_gemm/grouped_gemm_xdl_fp32.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"

template <ck::index_t... Is>
Expand Down
1 change: 1 addition & 0 deletions example/15_grouped_gemm/grouped_gemm_xdl_int4.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"

template <ck::index_t... Is>
Expand Down
Loading

0 comments on commit 4a2a56c

Please sign in to comment.