Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pass cuda_stream to turbomind::Linear #4

Merged
merged 21 commits into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion example/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,11 @@ def load_specified_linear_weights():

model.post_init()

res = model(x)
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
res = model(x)
stream.synchronize()

print(f'tm.linear.res: {res}')
abs_diff = torch.abs(res - ref_res).float()
rel_diff = abs_diff / torch.max(torch.abs(ref_res), torch.abs(res))
Expand Down
9 changes: 5 additions & 4 deletions src/turbomind/api/python/bind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,16 +329,17 @@ PYBIND11_MODULE(_turbomind_ext, m) {
.def(py::init([](size_t in_features, size_t out_features, int w_bit, int group_size) {
return new turbomind::Linear(in_features, out_features, w_bit, group_size);
}))
.def("post_init", [](turbomind::Linear* linear, py::object qweight, py::object scales, py::object qzeros,
.def("post_init", [](turbomind::Linear* self, py::object qweight, py::object scales, py::object qzeros,
bool simt){
auto _qweight = TorchTensorToTurbomindTensor(qweight);
auto _scales = TorchTensorToTurbomindTensor(scales);
auto _qzeros = TorchTensorToTurbomindTensor(qzeros);
linear->post_init(_qweight, *_scales, *_qzeros, simt);
self->post_init(_qweight, *_scales, *_qzeros, simt);
})
.def("forward", [](turbomind::Linear* linear, py::object in, py::object out) {
.def("forward", [](turbomind::Linear* self, py::object in, py::object out, int64_t stream_id = 0) {
auto _in = TorchTensorToTurbomindTensor(in);
auto _out = TorchTensorToTurbomindTensor(out);
return linear->forward(*_in, *_out);
auto stream = reinterpret_cast<cudaStream_t>(stream_id);
return self->forward(*_in, *_out, stream);
});
}
19 changes: 9 additions & 10 deletions src/turbomind/api/python/linear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ struct Linear::Impl {

workspace_.barriers_size = gemm::Gemm::kBarriersSize;
workspace_.partials_size = gemm::Gemm::kPartialsSize;
cudaMallocAsync(&workspace_.barriers, workspace_.barriers_size, stream_);
cudaMallocAsync(&workspace_.partials, workspace_.partials_size, stream_);
cudaMemsetAsync(workspace_.barriers, 0, workspace_.barriers_size, stream_);
cudaMallocAsync(&workspace_.barriers, workspace_.barriers_size, 0);
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lzhangzz I think we can pass cuda stream when constructing Linear::Impl. What's your opinion?

cudaMallocAsync(&workspace_.partials, workspace_.partials_size, 0);
cudaMemsetAsync(workspace_.barriers, 0, workspace_.barriers_size, 0);
}

~Impl()
{
cudaFreeAsync(workspace_.barriers, stream_);
cudaFreeAsync(workspace_.partials, stream_);
cudaFreeAsync(workspace_.barriers, 0);
cudaFreeAsync(workspace_.partials, 0);
workspace_ = {};
check_cuda_error(cudaFree(scales_zeros_));
}
Expand All @@ -60,7 +60,7 @@ struct Linear::Impl {
check_cuda_error(cudaFree(workspace));
}

void forward(const Tensor& in, Tensor& out) {
void forward(const Tensor& in, Tensor& out, cudaStream_t stream) {
TM_CHECK(in.type == TYPE_FP16 && out.type == TYPE_FP16);
TM_CHECK(in.shape.size() == 2 && in.shape[1] == input_dims_);
TM_CHECK(out.shape.size() == 2 && out.shape[0] == in.shape[0] && out.shape[1] == output_dims_);
Expand Down Expand Up @@ -106,7 +106,7 @@ struct Linear::Impl {
const_cast<void*>(out.data),
c_desc,
workspace_,
stream_);
stream);

if (ec) {
printf("%s: %d", __PRETTY_FUNCTION__, ec);
Expand Down Expand Up @@ -266,7 +266,6 @@ struct Linear::Impl {
gemm::Gemm gemm_;
gemm::DispatchPolicy dispatch_policy_{gemm::DispatchPolicy::kDefault};
gemm::Workspace workspace_;
cudaStream_t stream_{};

size_t input_dims_;
size_t output_dims_;
Expand All @@ -288,8 +287,8 @@ void Linear::post_init(std::shared_ptr<Tensor> qweight, const Tensor& scales, co
impl_->post_init(qweight, scales, qzeros, simt);
}

void Linear::forward(const Tensor& in, Tensor& out)
void Linear::forward(const Tensor& in, Tensor& out, cudaStream_t stream)
{
impl_->forward(in, out);
impl_->forward(in, out, stream);
}
} // namespace turbomind
2 changes: 1 addition & 1 deletion src/turbomind/api/python/linear.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class Linear {
Linear(size_t input_dims, size_t output_dims, int w_bit, int group_size);
void post_init(std::shared_ptr<Tensor> qweight, const Tensor& scales, const Tensor& qzeros,
bool simt);
void forward(const Tensor& in, Tensor& out);
void forward(const Tensor& in, Tensor& out, cudaStream_t stream = nullptr);
~Linear() {}

private:
Expand Down
3 changes: 2 additions & 1 deletion turbomind/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,8 @@ def forward(self, x):
dtype=torch.float16,
device=x.device,
)
self.linear.forward(x, out)
stream = torch.cuda.current_stream()
self.linear.forward(x, out, stream.cuda_stream)
out = torch.from_dlpack(out)
if self.bias is not None:
out.add_(self.bias)
Expand Down