Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update pre-commit hooks to include all C and C++ files #4332

Merged
merged 3 commits into from
Apr 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,6 @@ repos:
rev: v16.0.6
hooks:
- id: clang-format
exclude: |
(?x)^(
cpp/libcugraph_etl|
cpp/tests/c_api
)
types_or: [c, c++, cuda]
args: ["-fallback-style=none", "-style=file", "-i"]
- repo: https://github.com/rapidsai/dependency-file-generator
Expand Down
3 changes: 1 addition & 2 deletions cpp/libcugraph_etl/include/cugraph_etl/functions.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
* Copyright (c) 2021-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -18,7 +18,6 @@
#include <cudf/column/column.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>

#include <raft/core/handle.hpp>

namespace cugraph {
Expand Down
28 changes: 14 additions & 14 deletions cpp/libcugraph_etl/include/hash/concurrent_unordered_map.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,13 @@
* limitations under the License.
*/

/*
/*
* FIXME: This file is copied from cudf because CuCollections doesnt support concurrent
* insert/find for 8 byte key-value pair size. The plan is to migrate to
* using the cuco when the feature is supported. At that point this file can be deleted.
*/
#pragma once

#include <hash/hash_allocator.cuh>
#include <hash/helper_functions.cuh>
#include <hash/managed.cuh>

#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/hashing/detail/default_hash.cuh>
#include <cudf/hashing/detail/hash_functions.cuh>
Expand All @@ -34,6 +30,10 @@

#include <thrust/pair.h>

#include <hash/hash_allocator.cuh>
#include <hash/helper_functions.cuh>
#include <hash/managed.cuh>

#include <iostream>
#include <iterator>
#include <limits>
Expand Down Expand Up @@ -437,10 +437,10 @@ class concurrent_unordered_map {
m_hashtbl_values = m_allocator.allocate(m_capacity, stream);
}
RAFT_CUDA_TRY(cudaMemcpyAsync(m_hashtbl_values,
other.m_hashtbl_values,
m_capacity * sizeof(value_type),
cudaMemcpyDefault,
stream.value()));
other.m_hashtbl_values,
m_capacity * sizeof(value_type),
cudaMemcpyDefault,
stream.value()));
}

void clear_async(rmm::cuda_stream_view stream = rmm::cuda_stream_default)
Expand Down Expand Up @@ -484,12 +484,12 @@ class concurrent_unordered_map {
delete this;
}

concurrent_unordered_map() = delete;
concurrent_unordered_map(concurrent_unordered_map const&) = default;
concurrent_unordered_map(concurrent_unordered_map&&) = default;
concurrent_unordered_map() = delete;
concurrent_unordered_map(concurrent_unordered_map const&) = default;
concurrent_unordered_map(concurrent_unordered_map&&) = default;
concurrent_unordered_map& operator=(concurrent_unordered_map const&) = default;
concurrent_unordered_map& operator=(concurrent_unordered_map&&) = default;
~concurrent_unordered_map() = default;
concurrent_unordered_map& operator=(concurrent_unordered_map&&) = default;
~concurrent_unordered_map() = default;

private:
hasher m_hf;
Expand Down
198 changes: 99 additions & 99 deletions cpp/libcugraph_etl/include/hash/hash_allocator.cuh
Original file line number Diff line number Diff line change
@@ -1,99 +1,99 @@
/*
* Copyright (c) 2017-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HASH_ALLOCATOR_CUH
#define HASH_ALLOCATOR_CUH
#include <new>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
template <class T>
struct managed_allocator {
typedef T value_type;
rmm::mr::device_memory_resource* mr = new rmm::mr::managed_memory_resource;
managed_allocator() = default;
template <class U>
constexpr managed_allocator(const managed_allocator<U>&) noexcept
{
}
T* allocate(std::size_t n, rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
return static_cast<T*>(mr->allocate(n * sizeof(T), stream));
}
void deallocate(T* p,
std::size_t n,
rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
mr->deallocate(p, n * sizeof(T), stream);
}
};
template <class T, class U>
bool operator==(const managed_allocator<T>&, const managed_allocator<U>&)
{
return true;
}
template <class T, class U>
bool operator!=(const managed_allocator<T>&, const managed_allocator<U>&)
{
return false;
}
template <class T>
struct default_allocator {
typedef T value_type;
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource();
default_allocator() = default;
template <class U>
constexpr default_allocator(const default_allocator<U>&) noexcept
{
}
T* allocate(std::size_t n, rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
return static_cast<T*>(mr->allocate(n * sizeof(T), stream));
}
void deallocate(T* p,
std::size_t n,
rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
mr->deallocate(p, n * sizeof(T), stream);
}
};
template <class T, class U>
bool operator==(const default_allocator<T>&, const default_allocator<U>&)
{
return true;
}
template <class T, class U>
bool operator!=(const default_allocator<T>&, const default_allocator<U>&)
{
return false;
}
#endif
/*
* Copyright (c) 2017-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef HASH_ALLOCATOR_CUH
#define HASH_ALLOCATOR_CUH

#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>

#include <new>

template <class T>
struct managed_allocator {
typedef T value_type;
rmm::mr::device_memory_resource* mr = new rmm::mr::managed_memory_resource;

managed_allocator() = default;

template <class U>
constexpr managed_allocator(const managed_allocator<U>&) noexcept
{
}

T* allocate(std::size_t n, rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
return static_cast<T*>(mr->allocate(n * sizeof(T), stream));
}

void deallocate(T* p,
std::size_t n,
rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
mr->deallocate(p, n * sizeof(T), stream);
}
};

template <class T, class U>
bool operator==(const managed_allocator<T>&, const managed_allocator<U>&)
{
return true;
}
template <class T, class U>
bool operator!=(const managed_allocator<T>&, const managed_allocator<U>&)
{
return false;
}

template <class T>
struct default_allocator {
typedef T value_type;
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource();

default_allocator() = default;

template <class U>
constexpr default_allocator(const default_allocator<U>&) noexcept
{
}

T* allocate(std::size_t n, rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
return static_cast<T*>(mr->allocate(n * sizeof(T), stream));
}

void deallocate(T* p,
std::size_t n,
rmm::cuda_stream_view stream = rmm::cuda_stream_default) const
{
mr->deallocate(p, n * sizeof(T), stream);
}
};

template <class T, class U>
bool operator==(const default_allocator<T>&, const default_allocator<U>&)
{
return true;
}
template <class T, class U>
bool operator!=(const default_allocator<T>&, const default_allocator<U>&)
{
return false;
}

#endif
6 changes: 3 additions & 3 deletions cpp/libcugraph_etl/include/hash/helper_functions.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ __forceinline__ __device__ void store_pair_vectorized(pair_type* __restrict__ co

template <typename value_type, typename size_type, typename key_type, typename elem_type>
__global__ static void init_hashtbl(value_type* __restrict__ const hashtbl_values,
const size_type n,
const key_type key_val,
const elem_type elem_val)
const size_type n,
const key_type key_val,
const elem_type elem_val)
{
const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
Expand Down
Loading
Loading