Skip to content

Commit

Permalink
Use uppercase for const generic parameters (rust-lang#1035)
Browse files Browse the repository at this point in the history
  • Loading branch information
Amanieu authored Mar 1, 2021
1 parent 81e50a3 commit 46efde1
Show file tree
Hide file tree
Showing 7 changed files with 336 additions and 337 deletions.
3 changes: 1 addition & 2 deletions crates/core_arch/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@
clippy::shadow_reuse,
clippy::cognitive_complexity,
clippy::similar_names,
clippy::many_single_char_names,
non_upper_case_globals
clippy::many_single_char_names
)]
#![cfg_attr(test, allow(unused_imports))]
#![no_std]
Expand Down
6 changes: 3 additions & 3 deletions crates/core_arch/src/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
// out of `bits`-bit range.
pub(crate) struct ValidateConstImm<const imm: i32, const bits: i32>;
impl<const imm: i32, const bits: i32> ValidateConstImm<imm, bits> {
pub(crate) struct ValidateConstImm<const IMM: i32, const BITS: i32>;
impl<const IMM: i32, const BITS: i32> ValidateConstImm<IMM, BITS> {
pub(crate) const VALID: () = {
let _ = 1 / ((imm >= 0 && imm < (1 << bits)) as usize);
let _ = 1 / ((IMM >= 0 && IMM < (1 << BITS)) as usize);
};
}

Expand Down
80 changes: 40 additions & 40 deletions crates/core_arch/src/x86/avx2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2923,46 +2923,46 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i {
transmute(psllq(a.as_i64x4(), count.as_i64x2()))
}

/// Shifts packed 16-bit integers in `a` left by `imm8` while
/// Shifts packed 16-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllw, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi16<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(pslliw(a.as_i16x16(), imm8))
pub unsafe fn _mm256_slli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(pslliw(a.as_i16x16(), IMM8))
}

/// Shifts packed 32-bit integers in `a` left by `imm8` while
/// Shifts packed 32-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpslld, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi32<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psllid(a.as_i32x8(), imm8))
pub unsafe fn _mm256_slli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psllid(a.as_i32x8(), IMM8))
}

/// Shifts packed 64-bit integers in `a` left by `imm8` while
/// Shifts packed 64-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllq, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi64<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(pslliq(a.as_i64x4(), imm8))
pub unsafe fn _mm256_slli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(pslliq(a.as_i64x4(), IMM8))
}

/// Shifts 128-bit lanes in `a` left by `imm8` bytes while shifting in zeros.
Expand Down Expand Up @@ -3077,32 +3077,32 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i {
transmute(psrad(a.as_i32x8(), count.as_i32x4()))
}

/// Shifts packed 16-bit integers in `a` right by `imm8` while
/// Shifts packed 16-bit integers in `a` right by `IMM8` while
/// shifting in sign bits.
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsraw, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi16<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psraiw(a.as_i16x16(), imm8))
pub unsafe fn _mm256_srai_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psraiw(a.as_i16x16(), IMM8))
}

/// Shifts packed 32-bit integers in `a` right by `imm8` while
/// Shifts packed 32-bit integers in `a` right by `IMM8` while
/// shifting in sign bits.
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrad, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi32<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psraid(a.as_i32x8(), imm8))
pub unsafe fn _mm256_srai_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psraid(a.as_i32x8(), IMM8))
}

/// Shifts packed 32-bit integers in `a` right by the amount specified by the
Expand Down Expand Up @@ -3201,46 +3201,46 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i {
transmute(psrlq(a.as_i64x4(), count.as_i64x2()))
}

/// Shifts packed 16-bit integers in `a` right by `imm8` while shifting in
/// Shifts packed 16-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlw, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi16<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psrliw(a.as_i16x16(), imm8))
pub unsafe fn _mm256_srli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psrliw(a.as_i16x16(), IMM8))
}

/// Shifts packed 32-bit integers in `a` right by `imm8` while shifting in
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrld, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi32<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psrlid(a.as_i32x8(), imm8))
pub unsafe fn _mm256_srli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psrlid(a.as_i32x8(), IMM8))
}

/// Shifts packed 64-bit integers in `a` right by `imm8` while shifting in
/// Shifts packed 64-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlq, imm8 = 7))]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi64<const imm8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(imm8);
transmute(psrliq(a.as_i64x4(), imm8))
pub unsafe fn _mm256_srli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_imm8!(IMM8);
transmute(psrliq(a.as_i64x4(), IMM8))
}

/// Shifts packed 32-bit integers in `a` right by the amount specified by
Expand Down
34 changes: 17 additions & 17 deletions crates/core_arch/src/x86/sse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -992,7 +992,7 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
}

/// Shuffles packed single-precision (32-bit) floating-point elements in `a` and
/// `b` using `mask`.
/// `b` using `MASK`.
///
/// The lower half of result takes values from `a` and the higher half from
/// `b`. Mask is split to 2 control bits each to index the element from inputs.
Expand All @@ -1006,19 +1006,19 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
/// does not cause a problem in C, however Rust's commitment to strong typing does not allow this.
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(shufps, mask = 3))]
#[cfg_attr(test, assert_instr(shufps, MASK = 3))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shuffle_ps<const mask: i32>(a: __m128, b: __m128) -> __m128 {
static_assert_imm8!(mask);
pub unsafe fn _mm_shuffle_ps<const MASK: i32>(a: __m128, b: __m128) -> __m128 {
static_assert_imm8!(MASK);
simd_shuffle4(
a,
b,
[
mask as u32 & 0b11,
(mask as u32 >> 2) & 0b11,
((mask as u32 >> 4) & 0b11) + 4,
((mask as u32 >> 6) & 0b11) + 4,
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11) + 4,
((MASK as u32 >> 6) & 0b11) + 4,
],
)
}
Expand Down Expand Up @@ -1701,9 +1701,9 @@ pub const _MM_HINT_ET0: i32 = 7;
#[stable(feature = "simd_x86", since = "1.27.0")]
pub const _MM_HINT_ET1: i32 = 6;

/// Fetch the cache line that contains address `p` using the given `strategy`.
/// Fetch the cache line that contains address `p` using the given `STRATEGY`.
///
/// The `strategy` must be one of:
/// The `STRATEGY` must be one of:
///
/// * [`_MM_HINT_T0`](constant._MM_HINT_T0.html): Fetch into all levels of the
/// cache hierarchy.
Expand Down Expand Up @@ -1745,16 +1745,16 @@ pub const _MM_HINT_ET1: i32 = 6;
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_prefetch)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(prefetcht0, strategy = _MM_HINT_T0))]
#[cfg_attr(test, assert_instr(prefetcht1, strategy = _MM_HINT_T1))]
#[cfg_attr(test, assert_instr(prefetcht2, strategy = _MM_HINT_T2))]
#[cfg_attr(test, assert_instr(prefetchnta, strategy = _MM_HINT_NTA))]
#[cfg_attr(test, assert_instr(prefetcht0, STRATEGY = _MM_HINT_T0))]
#[cfg_attr(test, assert_instr(prefetcht1, STRATEGY = _MM_HINT_T1))]
#[cfg_attr(test, assert_instr(prefetcht2, STRATEGY = _MM_HINT_T2))]
#[cfg_attr(test, assert_instr(prefetchnta, STRATEGY = _MM_HINT_NTA))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_prefetch<const strategy: i32>(p: *const i8) {
pub unsafe fn _mm_prefetch<const STRATEGY: i32>(p: *const i8) {
// We use the `llvm.prefetch` instrinsic with `cache type` = 1 (data cache).
// `locality` and `rw` are based on our `strategy`.
prefetch(p, (strategy >> 2) & 1, strategy & 3, 1);
// `locality` and `rw` are based on our `STRATEGY`.
prefetch(p, (STRATEGY >> 2) & 1, STRATEGY & 3, 1);
}

/// Returns vector of type __m128 with undefined elements.
Expand Down
Loading

0 comments on commit 46efde1

Please sign in to comment.