diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 3227085e9d..e5df1b72c0 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -117,6 +117,7 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -130,6 +131,7 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -143,6 +145,7 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 { simd_extract(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -151,6 +154,7 @@ pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { simd_extract(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -159,6 +163,7 @@ pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let c: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let d: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -169,6 +174,7 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let c: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let d: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -179,6 +185,7 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let c: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); let d: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -189,6 +196,7 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let c: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let d: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -200,6 +208,7 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let c: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let d: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -211,6 +220,7 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let c: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let d: int32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -222,6 +232,7 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_eq(a, b) } @@ -230,6 +241,7 @@ pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_eq(a, b) } @@ -238,6 +250,7 @@ pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_eq(a, b) } @@ -246,6 +259,7 @@ pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_eq(a, b) } @@ -254,6 +268,7 @@ pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { simd_eq(a, b) } @@ -262,6 +277,7 @@ pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { simd_eq(a, b) } @@ -270,6 +286,7 @@ pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_eq(a, b) } @@ -278,6 +295,7 @@ pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_eq(a, b) } @@ -286,6 +304,7 @@ pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { transmute(vceq_s64(transmute(a), transmute(b))) } @@ -294,6 +313,7 @@ pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { transmute(vceq_u64(transmute(a), transmute(b))) } @@ -302,6 +322,7 @@ pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { simd_extract(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -310,6 +331,7 @@ pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { simd_extract(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -318,6 +340,7 @@ pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -327,6 +350,7 @@ pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -336,6 +360,7 @@ pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -345,6 +370,7 @@ pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -354,6 +380,7 @@ pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_eq(a, transmute(b)) @@ -363,6 +390,7 @@ pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -372,6 +400,7 @@ pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_eq(a, transmute(b)) @@ -381,6 +410,7 @@ pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_eq(a, transmute(b)) @@ -390,6 +420,7 @@ pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -399,6 +430,7 @@ pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -408,6 +440,7 @@ pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_eq(a, transmute(b)) @@ -417,6 +450,7 @@ pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_eq(a, transmute(b)) @@ -426,6 +460,7 @@ pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -435,6 +470,7 @@ pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -444,6 +480,7 @@ pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { let b: u16x4 = u16x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -453,6 +490,7 @@ pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -462,6 +500,7 @@ pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { let b: u32x2 = u32x2::new(0, 0); simd_eq(a, transmute(b)) @@ -471,6 +510,7 @@ pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { let b: u32x4 = u32x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) @@ -480,6 +520,7 @@ pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { let b: u64x1 = u64x1::new(0); simd_eq(a, transmute(b)) @@ -489,6 +530,7 @@ pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { let b: u64x2 = u64x2::new(0, 0); simd_eq(a, transmute(b)) @@ -498,6 +540,7 @@ pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_eq(a, transmute(b)) @@ -507,6 +550,7 @@ pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_eq(a, transmute(b)) @@ -516,6 +560,7 @@ pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_eq(a, transmute(b)) @@ -525,6 +570,7 @@ pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_eq(a, transmute(b)) @@ -534,6 +580,7 @@ pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzd_s64(a: i64) -> u64 { transmute(vceqz_s64(transmute(a))) } @@ -542,6 +589,7 @@ pub unsafe fn vceqzd_s64(a: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzd_u64(a: u64) -> u64 { transmute(vceqz_u64(transmute(a))) } @@ -550,6 +598,7 @@ pub unsafe fn vceqzd_u64(a: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzs_f32(a: f32) -> u32 { simd_extract(vceqz_f32(vdup_n_f32(a)), 0) } @@ -558,6 +607,7 @@ pub unsafe fn vceqzs_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzd_f64(a: f64) -> u64 { simd_extract(vceqz_f64(vdup_n_f64(a)), 0) } @@ -566,6 +616,7 @@ pub unsafe fn vceqzd_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { let c: int64x1_t = simd_and(a, b); let d: i64x1 = i64x1::new(0); @@ -576,6 +627,7 @@ pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let c: int64x2_t = simd_and(a, b); let d: i64x2 = i64x2::new(0, 0); @@ -586,6 +638,7 @@ pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { let c: poly64x1_t = simd_and(a, b); let d: i64x1 = i64x1::new(0); @@ -596,6 +649,7 @@ pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { let c: poly64x2_t = simd_and(a, b); let d: i64x2 = i64x2::new(0, 0); @@ -606,6 +660,7 @@ pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c: uint64x1_t = simd_and(a, b); let d: u64x1 = u64x1::new(0); @@ -616,6 +671,7 @@ pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c: uint64x2_t = simd_and(a, b); let d: u64x2 = u64x2::new(0, 0); @@ -626,6 +682,7 @@ pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { transmute(vtst_s64(transmute(a), transmute(b))) } @@ -634,6 +691,7 @@ pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { transmute(vtst_u64(transmute(a), transmute(b))) } @@ -642,6 +700,7 @@ pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -655,6 +714,7 @@ pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -668,6 +728,7 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { simd_extract(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) } @@ -676,6 +737,7 @@ pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { simd_extract(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) } @@ -684,6 +746,7 @@ pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { simd_fabs(a) } @@ -692,6 +755,7 @@ pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { simd_fabs(a) } @@ -700,6 +764,7 @@ pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_gt(a, b) } @@ -708,6 +773,7 @@ pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_gt(a, b) } @@ -716,6 +782,7 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_gt(a, b) } @@ -724,6 +791,7 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_gt(a, b) } @@ -732,6 +800,7 @@ pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_gt(a, b) } @@ -740,6 +809,7 @@ pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_gt(a, b) } @@ -748,6 +818,7 @@ pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { transmute(vcgt_s64(transmute(a), transmute(b))) } @@ -756,6 +827,7 @@ pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { transmute(vcgt_u64(transmute(a), transmute(b))) } @@ -764,6 +836,7 @@ pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { simd_extract(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -772,6 +845,7 @@ pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { simd_extract(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -780,6 +854,7 @@ pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_lt(a, b) } @@ -788,6 +863,7 @@ pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_lt(a, b) } @@ -796,6 +872,7 @@ pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_lt(a, b) } @@ -804,6 +881,7 @@ pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_lt(a, b) } @@ -812,6 +890,7 @@ pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_lt(a, b) } @@ -820,6 +899,7 @@ pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_lt(a, b) } @@ -828,6 +908,7 @@ pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { transmute(vclt_s64(transmute(a), transmute(b))) } @@ -836,6 +917,7 @@ pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { transmute(vclt_u64(transmute(a), transmute(b))) } @@ -844,6 +926,7 @@ pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { simd_extract(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -852,6 +935,7 @@ pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { simd_extract(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -860,6 +944,7 @@ pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_le(a, b) } @@ -868,6 +953,7 @@ pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_le(a, b) } @@ -876,6 +962,7 @@ pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { transmute(vcge_s64(transmute(a), transmute(b))) } @@ -884,6 +971,7 @@ pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { transmute(vcge_u64(transmute(a), transmute(b))) } @@ -892,6 +980,7 @@ pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { simd_extract(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -900,6 +989,7 @@ pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { simd_extract(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -908,6 +998,7 @@ pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_le(a, b) } @@ -916,6 +1007,7 @@ pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_le(a, b) } @@ -924,6 +1016,7 @@ pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_le(a, b) } @@ -932,6 +1025,7 @@ pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_le(a, b) } @@ -940,6 +1034,7 @@ pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { transmute(vcle_s64(transmute(a), transmute(b))) } @@ -948,6 +1043,7 @@ pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { transmute(vcle_u64(transmute(a), transmute(b))) } @@ -956,6 +1052,7 @@ pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { simd_extract(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } @@ -964,6 +1061,7 @@ pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { simd_extract(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } @@ -972,6 +1070,7 @@ pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_ge(a, b) } @@ -980,6 +1079,7 @@ pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_ge(a, b) } @@ -988,6 +1088,7 @@ pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_ge(a, b) } @@ -996,6 +1097,7 @@ pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_ge(a, b) } @@ -1004,6 +1106,7 @@ pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_ge(a, b) } @@ -1012,6 +1115,7 @@ pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_ge(a, b) } @@ -1020,6 +1124,7 @@ pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) @@ -1029,6 +1134,7 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) @@ -1038,6 +1144,7 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_ge(a, transmute(b)) @@ -1047,6 +1154,7 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) @@ -1056,6 +1164,7 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_ge(a, transmute(b)) @@ -1065,6 +1174,7 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_ge(a, transmute(b)) @@ -1074,6 +1184,7 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_ge(a, transmute(b)) @@ -1083,6 +1194,7 @@ pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_ge(a, transmute(b)) @@ -1092,6 +1204,7 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_ge(a, transmute(b)) @@ -1101,6 +1214,7 @@ pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_ge(a, transmute(b)) @@ -1110,6 +1224,7 @@ pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_ge(a, transmute(b)) @@ -1119,6 +1234,7 @@ pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_ge(a, transmute(b)) @@ -1128,6 +1244,7 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(eor))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezd_s64(a: i64) -> u64 { transmute(vcgez_s64(transmute(a))) } @@ -1136,6 +1253,7 @@ pub unsafe fn vcgezd_s64(a: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezs_f32(a: f32) -> u32 { simd_extract(vcgez_f32(vdup_n_f32(a)), 0) } @@ -1144,6 +1262,7 @@ pub unsafe fn vcgezs_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgezd_f64(a: f64) -> u64 { simd_extract(vcgez_f64(vdup_n_f64(a)), 0) } @@ -1152,6 +1271,7 @@ pub unsafe fn vcgezd_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) @@ -1161,6 +1281,7 @@ pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) @@ -1170,6 +1291,7 @@ pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_gt(a, transmute(b)) @@ -1179,6 +1301,7 @@ pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) @@ -1188,6 +1311,7 @@ pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_gt(a, transmute(b)) @@ -1197,6 +1321,7 @@ pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_gt(a, transmute(b)) @@ -1206,6 +1331,7 @@ pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_gt(a, transmute(b)) @@ -1215,6 +1341,7 @@ pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_gt(a, transmute(b)) @@ -1224,6 +1351,7 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_gt(a, transmute(b)) @@ -1233,6 +1361,7 @@ pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_gt(a, transmute(b)) @@ -1242,6 +1371,7 @@ pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_gt(a, transmute(b)) @@ -1251,6 +1381,7 @@ pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_gt(a, transmute(b)) @@ -1260,6 +1391,7 @@ pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzd_s64(a: i64) -> u64 { transmute(vcgtz_s64(transmute(a))) } @@ -1268,6 +1400,7 @@ pub unsafe fn vcgtzd_s64(a: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzs_f32(a: f32) -> u32 { simd_extract(vcgtz_f32(vdup_n_f32(a)), 0) } @@ -1276,6 +1409,7 @@ pub unsafe fn vcgtzs_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtzd_f64(a: f64) -> u64 { simd_extract(vcgtz_f64(vdup_n_f64(a)), 0) } @@ -1284,6 +1418,7 @@ pub unsafe fn vcgtzd_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) @@ -1293,6 +1428,7 @@ pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) @@ -1302,6 +1438,7 @@ pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_le(a, transmute(b)) @@ -1311,6 +1448,7 @@ pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) @@ -1320,6 +1458,7 @@ pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_le(a, transmute(b)) @@ -1329,6 +1468,7 @@ pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_le(a, transmute(b)) @@ -1338,6 +1478,7 @@ pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_le(a, transmute(b)) @@ -1347,6 +1488,7 @@ pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_le(a, transmute(b)) @@ -1356,6 +1498,7 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_le(a, transmute(b)) @@ -1365,6 +1508,7 @@ pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_le(a, transmute(b)) @@ -1374,6 +1518,7 @@ pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_le(a, transmute(b)) @@ -1383,6 +1528,7 @@ pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_le(a, transmute(b)) @@ -1392,6 +1538,7 @@ pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezd_s64(a: i64) -> u64 { transmute(vclez_s64(transmute(a))) } @@ -1400,6 +1547,7 @@ pub unsafe fn vclezd_s64(a: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezs_f32(a: f32) -> u32 { simd_extract(vclez_f32(vdup_n_f32(a)), 0) } @@ -1408,6 +1556,7 @@ pub unsafe fn vclezs_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vclezd_f64(a: f64) -> u64 { simd_extract(vclez_f64(vdup_n_f64(a)), 0) } @@ -1416,6 +1565,7 @@ pub unsafe fn vclezd_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) @@ -1425,6 +1575,7 @@ pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) @@ -1434,6 +1585,7 @@ pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_lt(a, transmute(b)) @@ -1443,6 +1595,7 @@ pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) @@ -1452,6 +1605,7 @@ pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_lt(a, transmute(b)) @@ -1461,6 +1615,7 @@ pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_lt(a, transmute(b)) @@ -1470,6 +1625,7 @@ pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_lt(a, transmute(b)) @@ -1479,6 +1635,7 @@ pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_lt(a, transmute(b)) @@ -1488,6 +1645,7 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_lt(a, transmute(b)) @@ -1497,6 +1655,7 @@ pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_lt(a, transmute(b)) @@ -1506,6 +1665,7 @@ pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_lt(a, transmute(b)) @@ -1515,6 +1675,7 @@ pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_lt(a, transmute(b)) @@ -1524,6 +1685,7 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(asr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzd_s64(a: i64) -> u64 { transmute(vcltz_s64(transmute(a))) } @@ -1532,6 +1694,7 @@ pub unsafe fn vcltzd_s64(a: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzs_f32(a: f32) -> u32 { simd_extract(vcltz_f32(vdup_n_f32(a)), 0) } @@ -1540,6 +1703,7 @@ pub unsafe fn vcltzs_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltzd_f64(a: f64) -> u64 { simd_extract(vcltz_f64(vdup_n_f64(a)), 0) } @@ -1548,6 +1712,7 @@ pub unsafe fn vcltzd_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1561,6 +1726,7 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1574,6 +1740,7 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1587,6 +1754,7 @@ pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1600,6 +1768,7 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1613,6 +1782,7 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1626,6 +1796,7 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1639,6 +1810,7 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1652,6 +1824,7 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { vcagt_f64(b, a) } @@ -1660,6 +1833,7 @@ pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcagtq_f64(b, a) } @@ -1668,6 +1842,7 @@ pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { vcagts_f32(b, a) } @@ -1676,6 +1851,7 @@ pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { vcagtd_f64(b, a) } @@ -1684,6 +1860,7 @@ pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { vcage_f64(b, a) } @@ -1692,6 +1869,7 @@ pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcageq_f64(b, a) } @@ -1700,6 +1878,7 @@ pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { vcages_f32(b, a) } @@ -1708,6 +1887,7 @@ pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { vcaged_f64(b, a) } @@ -1717,6 +1897,7 @@ pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -1738,6 +1919,7 @@ pub unsafe fn vcopy_lane_s8(a: int8x8_t, b: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE1); static_assert_imm4!(LANE2); @@ -1767,6 +1949,7 @@ pub unsafe fn vcopyq_laneq_s8(a: int8x16_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -1784,6 +1967,7 @@ pub unsafe fn vcopy_lane_s16(a: int16x4_t, b #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -1805,6 +1989,7 @@ pub unsafe fn vcopyq_laneq_s16(a: int16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -1820,6 +2005,7 @@ pub unsafe fn vcopy_lane_s32(a: int32x2_t, b #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -1837,6 +2023,7 @@ pub unsafe fn vcopyq_laneq_s32(a: int32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -1852,6 +2039,7 @@ pub unsafe fn vcopyq_laneq_s64(a: int64x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -1873,6 +2061,7 @@ pub unsafe fn vcopy_lane_u8(a: uint8x8_t, b: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE1); static_assert_imm4!(LANE2); @@ -1902,6 +2091,7 @@ pub unsafe fn vcopyq_laneq_u8(a: uint8x16_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -1919,6 +2109,7 @@ pub unsafe fn vcopy_lane_u16(a: uint16x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -1940,6 +2131,7 @@ pub unsafe fn vcopyq_laneq_u16(a: uint16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -1955,6 +2147,7 @@ pub unsafe fn vcopy_lane_u32(a: uint32x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -1972,6 +2165,7 @@ pub unsafe fn vcopyq_laneq_u32(a: uint32x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -1987,6 +2181,7 @@ pub unsafe fn vcopyq_laneq_u64(a: uint64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -2008,6 +2203,7 @@ pub unsafe fn vcopy_lane_p8(a: poly8x8_t, b: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE1); static_assert_imm4!(LANE2); @@ -2037,6 +2233,7 @@ pub unsafe fn vcopyq_laneq_p8(a: poly8x16_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -2054,6 +2251,7 @@ pub unsafe fn vcopy_lane_p16(a: poly16x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE1); static_assert_imm3!(LANE2); @@ -2075,6 +2273,7 @@ pub unsafe fn vcopyq_laneq_p16(a: poly16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -2090,6 +2289,7 @@ pub unsafe fn vcopyq_laneq_p64(a: poly64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -2105,6 +2305,7 @@ pub unsafe fn vcopy_lane_f32(a: float32x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE1); static_assert_imm2!(LANE2); @@ -2122,6 +2323,7 @@ pub unsafe fn vcopyq_laneq_f32(a: float32x4_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE1); static_assert_imm1!(LANE2); @@ -2137,6 +2339,7 @@ pub unsafe fn vcopyq_laneq_f64(a: float64x2_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s8(a: int8x8_t, b: int8x16_t) -> int8x8_t { static_assert_imm3!(LANE1); static_assert_imm4!(LANE2); @@ -2159,6 +2362,7 @@ pub unsafe fn vcopy_laneq_s8(a: int8x8_t, b: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm2!(LANE1); static_assert_imm3!(LANE2); @@ -2177,6 +2381,7 @@ pub unsafe fn vcopy_laneq_s16(a: int16x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm1!(LANE1); static_assert_imm2!(LANE2); @@ -2193,6 +2398,7 @@ pub unsafe fn vcopy_laneq_s32(a: int32x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u8(a: uint8x8_t, b: uint8x16_t) -> uint8x8_t { static_assert_imm3!(LANE1); static_assert_imm4!(LANE2); @@ -2215,6 +2421,7 @@ pub unsafe fn vcopy_laneq_u8(a: uint8x8_t, b #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { static_assert_imm2!(LANE1); static_assert_imm3!(LANE2); @@ -2233,6 +2440,7 @@ pub unsafe fn vcopy_laneq_u16(a: uint16x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { static_assert_imm1!(LANE1); static_assert_imm2!(LANE2); @@ -2249,6 +2457,7 @@ pub unsafe fn vcopy_laneq_u32(a: uint32x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_p8(a: poly8x8_t, b: poly8x16_t) -> poly8x8_t { static_assert_imm3!(LANE1); static_assert_imm4!(LANE2); @@ -2271,6 +2480,7 @@ pub unsafe fn vcopy_laneq_p8(a: poly8x8_t, b #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_p16(a: poly16x4_t, b: poly16x8_t) -> poly16x4_t { static_assert_imm2!(LANE1); static_assert_imm3!(LANE2); @@ -2289,6 +2499,7 @@ pub unsafe fn vcopy_laneq_p16(a: poly16x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_imm1!(LANE1); static_assert_imm2!(LANE2); @@ -2305,6 +2516,7 @@ pub unsafe fn vcopy_laneq_f32(a: float32x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s8(a: int8x16_t, b: int8x8_t) -> int8x16_t { static_assert_imm4!(LANE1); static_assert_imm3!(LANE2); @@ -2335,6 +2547,7 @@ pub unsafe fn vcopyq_lane_s8(a: int8x16_t, b #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm3!(LANE1); static_assert_imm2!(LANE2); @@ -2357,6 +2570,7 @@ pub unsafe fn vcopyq_lane_s16(a: int16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm2!(LANE1); static_assert_imm1!(LANE2); @@ -2375,6 +2589,7 @@ pub unsafe fn vcopyq_lane_s32(a: int32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t { static_assert_imm4!(LANE1); static_assert_imm3!(LANE2); @@ -2405,6 +2620,7 @@ pub unsafe fn vcopyq_lane_u8(a: uint8x16_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { static_assert_imm3!(LANE1); static_assert_imm2!(LANE2); @@ -2427,6 +2643,7 @@ pub unsafe fn vcopyq_lane_u16(a: uint16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { static_assert_imm2!(LANE1); static_assert_imm1!(LANE2); @@ -2445,6 +2662,7 @@ pub unsafe fn vcopyq_lane_u32(a: uint32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p8(a: poly8x16_t, b: poly8x8_t) -> poly8x16_t { static_assert_imm4!(LANE1); static_assert_imm3!(LANE2); @@ -2475,6 +2693,7 @@ pub unsafe fn vcopyq_lane_p8(a: poly8x16_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p16(a: poly16x8_t, b: poly16x4_t) -> poly16x8_t { static_assert_imm3!(LANE1); static_assert_imm2!(LANE2); @@ -2497,6 +2716,7 @@ pub unsafe fn vcopyq_lane_p16(a: poly16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s64(a: int64x2_t, b: int64x1_t) -> int64x2_t { static_assert_imm1!(LANE1); static_assert!(LANE2 : i32 where LANE2 == 0); @@ -2513,6 +2733,7 @@ pub unsafe fn vcopyq_lane_s64(a: int64x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u64(a: uint64x2_t, b: uint64x1_t) -> uint64x2_t { static_assert_imm1!(LANE1); static_assert!(LANE2 : i32 where LANE2 == 0); @@ -2529,6 +2750,7 @@ pub unsafe fn vcopyq_lane_u64(a: uint64x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p64(a: poly64x2_t, b: poly64x1_t) -> poly64x2_t { static_assert_imm1!(LANE1); static_assert!(LANE2 : i32 where LANE2 == 0); @@ -2545,6 +2767,7 @@ pub unsafe fn vcopyq_lane_p64(a: poly64x2_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_imm2!(LANE1); static_assert_imm1!(LANE2); @@ -2563,6 +2786,7 @@ pub unsafe fn vcopyq_lane_f32(a: float32x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { static_assert_imm1!(LANE1); static_assert!(LANE2 : i32 where LANE2 == 0); @@ -2578,6 +2802,7 @@ pub unsafe fn vcopyq_lane_f64(a: float64x2_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { transmute(a) } @@ -2586,6 +2811,7 @@ pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { simd_cast(a) } @@ -2594,6 +2820,7 @@ pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { simd_cast(a) } @@ -2602,6 +2829,7 @@ pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { simd_cast(a) } @@ -2610,6 +2838,7 @@ pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { simd_cast(a) } @@ -2618,6 +2847,7 @@ pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { simd_cast(a) } @@ -2626,6 +2856,7 @@ pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { let b: float32x2_t = simd_shuffle2!(a, a, [2, 3]); simd_cast(b) @@ -2635,6 +2866,7 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { simd_cast(a) } @@ -2643,6 +2875,7 @@ pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { simd_shuffle4!(a, simd_cast(b), [0, 1, 2, 3]) } @@ -2651,6 +2884,7 @@ pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2664,6 +2898,7 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { simd_extract(vcvtx_f32_f64(vdupq_n_f64(a)), 0) } @@ -2672,6 +2907,7 @@ pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { simd_shuffle4!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) } @@ -2681,6 +2917,7 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2696,6 +2933,7 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2711,6 +2949,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2726,6 +2965,7 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2741,6 +2981,7 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2756,6 +2997,7 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2771,6 +3013,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2786,6 +3029,7 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2801,6 +3045,7 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2816,6 +3061,7 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2831,6 +3077,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2846,6 +3093,7 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2861,6 +3109,7 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2876,6 +3125,7 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2891,6 +3141,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2906,6 +3157,7 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] @@ -2920,6 +3172,7 @@ pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { a as f32 } @@ -2928,6 +3181,7 @@ pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { a as f64 } @@ -2936,6 +3190,7 @@ pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { a as f32 } @@ -2944,6 +3199,7 @@ pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { a as f64 } @@ -2952,6 +3208,7 @@ pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { a as i32 } @@ -2960,6 +3217,7 @@ pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { a as i64 } @@ -2968,6 +3226,7 @@ pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { a as u32 } @@ -2976,6 +3235,7 @@ pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { a as u64 } @@ -2984,6 +3244,7 @@ pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2997,6 +3258,7 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3010,6 +3272,7 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3023,6 +3286,7 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3036,6 +3300,7 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3049,6 +3314,7 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3062,6 +3328,7 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3075,6 +3342,7 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3088,6 +3356,7 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3101,6 +3370,7 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3114,6 +3384,7 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3127,6 +3398,7 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3140,6 +3412,7 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3153,6 +3426,7 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3166,6 +3440,7 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3179,6 +3454,7 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3192,6 +3468,7 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3205,6 +3482,7 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3218,6 +3496,7 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3231,6 +3510,7 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3244,6 +3524,7 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3257,6 +3538,7 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3270,6 +3552,7 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3283,6 +3566,7 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3296,6 +3580,7 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3309,6 +3594,7 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3322,6 +3608,7 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3335,6 +3622,7 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3348,6 +3636,7 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3361,6 +3650,7 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3374,6 +3664,7 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3387,6 +3678,7 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3400,6 +3692,7 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3413,6 +3706,7 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3426,6 +3720,7 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3439,6 +3734,7 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3452,6 +3748,7 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3465,6 +3762,7 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3478,6 +3776,7 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3491,6 +3790,7 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3504,6 +3804,7 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3517,6 +3818,7 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3530,6 +3832,7 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3543,6 +3846,7 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3556,6 +3860,7 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3569,6 +3874,7 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3582,6 +3888,7 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3595,6 +3902,7 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3608,6 +3916,7 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3621,6 +3930,7 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3634,6 +3944,7 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3647,6 +3958,7 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3661,6 +3973,7 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3671,6 +3984,7 @@ pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3681,6 +3995,7 @@ pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3691,6 +4006,7 @@ pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3701,6 +4017,7 @@ pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { static_assert!(N : i32 where N == 0); a @@ -3711,6 +4028,7 @@ pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { static_assert!(N : i32 where N == 0); a @@ -3721,6 +4039,7 @@ pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3731,6 +4050,7 @@ pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3741,6 +4061,7 @@ pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3751,6 +4072,7 @@ pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { static_assert_imm4!(N); simd_extract(a, N as u32) @@ -3761,6 +4083,7 @@ pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3771,6 +4094,7 @@ pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3781,6 +4105,7 @@ pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3791,6 +4116,7 @@ pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3801,6 +4127,7 @@ pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { static_assert!(N : i32 where N == 0); simd_extract(a, N as u32) @@ -3811,6 +4138,7 @@ pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3821,6 +4149,7 @@ pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3831,6 +4160,7 @@ pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { static_assert_imm4!(N); simd_extract(a, N as u32) @@ -3841,6 +4171,7 @@ pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3851,6 +4182,7 @@ pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3861,6 +4193,7 @@ pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3871,6 +4204,7 @@ pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3881,6 +4215,7 @@ pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { static_assert!(N : i32 where N == 0); simd_extract(a, N as u32) @@ -3891,6 +4226,7 @@ pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3901,6 +4237,7 @@ pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3911,6 +4248,7 @@ pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { static_assert_imm4!(N); simd_extract(a, N as u32) @@ -3921,6 +4259,7 @@ pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3931,6 +4270,7 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { static_assert_imm3!(N); simd_extract(a, N as u32) @@ -3941,6 +4281,7 @@ pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3951,6 +4292,7 @@ pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { static_assert_imm2!(N); simd_extract(a, N as u32) @@ -3961,6 +4303,7 @@ pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { static_assert!(N : i32 where N == 0); simd_extract(a, N as u32) @@ -3971,6 +4314,7 @@ pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { static_assert_imm1!(N); simd_extract(a, N as u32) @@ -3981,6 +4325,7 @@ pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3995,6 +4340,7 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4008,6 +4354,7 @@ pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { simd_add(a, simd_mul(b, c)) } @@ -4016,6 +4363,7 @@ pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { simd_add(a, simd_mul(b, c)) } @@ -4024,6 +4372,7 @@ pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let c: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -4034,6 +4383,7 @@ pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let c: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -4044,6 +4394,7 @@ pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); let c: int32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -4054,6 +4405,7 @@ pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let c: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -4064,6 +4416,7 @@ pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let c: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -4074,6 +4427,7 @@ pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); let c: uint32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -4084,6 +4438,7 @@ pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vmlal_high_s16(a, b, vdupq_n_s16(c)) } @@ -4092,6 +4447,7 @@ pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vmlal_high_s32(a, b, vdupq_n_s32(c)) } @@ -4100,6 +4456,7 @@ pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { vmlal_high_u16(a, b, vdupq_n_u16(c)) } @@ -4108,6 +4465,7 @@ pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { vmlal_high_u32(a, b, vdupq_n_u32(c)) } @@ -4117,6 +4475,7 @@ pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlal_high_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4127,6 +4486,7 @@ pub unsafe fn vmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlal_high_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4137,6 +4497,7 @@ pub unsafe fn vmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlal_high_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4147,6 +4508,7 @@ pub unsafe fn vmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlal_high_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4157,6 +4519,7 @@ pub unsafe fn vmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlal_high_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4167,6 +4530,7 @@ pub unsafe fn vmlal_high_lane_u16(a: uint32x4_t, b: uint16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlal_high_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4177,6 +4541,7 @@ pub unsafe fn vmlal_high_laneq_u16(a: uint32x4_t, b: uint16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlal_high_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4187,6 +4552,7 @@ pub unsafe fn vmlal_high_lane_u32(a: uint64x2_t, b: uint32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlal_high_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4196,6 +4562,7 @@ pub unsafe fn vmlal_high_laneq_u32(a: uint64x2_t, b: uint32x4_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { simd_sub(a, simd_mul(b, c)) } @@ -4204,6 +4571,7 @@ pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { simd_sub(a, simd_mul(b, c)) } @@ -4212,6 +4580,7 @@ pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let c: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -4222,6 +4591,7 @@ pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let c: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -4232,6 +4602,7 @@ pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); let c: int32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -4242,6 +4613,7 @@ pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let c: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -4252,6 +4624,7 @@ pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let c: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -4262,6 +4635,7 @@ pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); let c: uint32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -4272,6 +4646,7 @@ pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vmlsl_high_s16(a, b, vdupq_n_s16(c)) } @@ -4280,6 +4655,7 @@ pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vmlsl_high_s32(a, b, vdupq_n_s32(c)) } @@ -4288,6 +4664,7 @@ pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { vmlsl_high_u16(a, b, vdupq_n_u16(c)) } @@ -4296,6 +4673,7 @@ pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { vmlsl_high_u32(a, b, vdupq_n_u32(c)) } @@ -4305,6 +4683,7 @@ pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsl_high_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4315,6 +4694,7 @@ pub unsafe fn vmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlsl_high_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4325,6 +4705,7 @@ pub unsafe fn vmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlsl_high_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4335,6 +4716,7 @@ pub unsafe fn vmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlsl_high_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4345,6 +4727,7 @@ pub unsafe fn vmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsl_high_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4355,6 +4738,7 @@ pub unsafe fn vmlsl_high_lane_u16(a: uint32x4_t, b: uint16x8_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlsl_high_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4365,6 +4749,7 @@ pub unsafe fn vmlsl_high_laneq_u16(a: uint32x4_t, b: uint16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlsl_high_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4375,6 +4760,7 @@ pub unsafe fn vmlsl_high_lane_u32(a: uint64x2_t, b: uint32x4_t, #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlsl_high_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4384,6 +4770,7 @@ pub unsafe fn vmlsl_high_laneq_u32(a: uint64x2_t, b: uint32x4_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { let c: int8x8_t = simd_cast(b); simd_shuffle16!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -4393,6 +4780,7 @@ pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { let c: int16x4_t = simd_cast(b); simd_shuffle8!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -4402,6 +4790,7 @@ pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { let c: int32x2_t = simd_cast(b); simd_shuffle4!(a, c, [0, 1, 2, 3]) @@ -4411,6 +4800,7 @@ pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { let c: uint8x8_t = simd_cast(b); simd_shuffle16!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -4420,6 +4810,7 @@ pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { let c: uint16x4_t = simd_cast(b); simd_shuffle8!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -4429,6 +4820,7 @@ pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(xtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { let c: uint32x2_t = simd_cast(b); simd_shuffle4!(a, c, [0, 1, 2, 3]) @@ -4438,6 +4830,7 @@ pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(neg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { simd_neg(a) } @@ -4446,6 +4839,7 @@ pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(neg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { simd_neg(a) } @@ -4454,6 +4848,7 @@ pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(neg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vnegd_s64(a: i64) -> i64 { a.wrapping_neg() } @@ -4462,6 +4857,7 @@ pub unsafe fn vnegd_s64(a: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { simd_neg(a) } @@ -4470,6 +4866,7 @@ pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { simd_neg(a) } @@ -4478,6 +4875,7 @@ pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4491,6 +4889,7 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4504,6 +4903,7 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqnegb_s8(a: i8) -> i8 { simd_extract(vqneg_s8(vdup_n_s8(a)), 0) } @@ -4512,6 +4912,7 @@ pub unsafe fn vqnegb_s8(a: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqnegh_s16(a: i16) -> i16 { simd_extract(vqneg_s16(vdup_n_s16(a)), 0) } @@ -4520,6 +4921,7 @@ pub unsafe fn vqnegh_s16(a: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqnegs_s32(a: i32) -> i32 { simd_extract(vqneg_s32(vdup_n_s32(a)), 0) } @@ -4528,6 +4930,7 @@ pub unsafe fn vqnegs_s32(a: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqneg))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqnegd_s64(a: i64) -> i64 { simd_extract(vqneg_s64(vdup_n_s64(a)), 0) } @@ -4536,6 +4939,7 @@ pub unsafe fn vqnegd_s64(a: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { let a: int8x8_t = vdup_n_s8(a); let b: int8x8_t = vdup_n_s8(b); @@ -4546,6 +4950,7 @@ pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { let a: int16x4_t = vdup_n_s16(a); let b: int16x4_t = vdup_n_s16(b); @@ -4556,6 +4961,7 @@ pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { let a: uint8x8_t = vdup_n_u8(a); let b: uint8x8_t = vdup_n_u8(b); @@ -4566,6 +4972,7 @@ pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { let a: uint16x4_t = vdup_n_u16(a); let b: uint16x4_t = vdup_n_u16(b); @@ -4576,6 +4983,7 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4589,6 +4997,7 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4602,6 +5011,7 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4615,6 +5025,7 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4628,6 +5039,7 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4641,6 +5053,7 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4654,6 +5067,7 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { transmute(vrbit_s8(transmute(a))) } @@ -4662,6 +5076,7 @@ pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vrbitq_s8(transmute(a))) } @@ -4670,6 +5085,7 @@ pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { transmute(vrbit_s8(transmute(a))) } @@ -4678,6 +5094,7 @@ pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rbit))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vrbitq_s8(transmute(a))) } @@ -4686,6 +5103,7 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4699,6 +5117,7 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4712,6 +5131,7 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4725,6 +5145,7 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4738,6 +5159,7 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinta))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4751,6 +5173,7 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinta))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4764,6 +5187,7 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinta))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4777,6 +5201,7 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinta))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4790,6 +5215,7 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4803,6 +5229,7 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4816,6 +5243,7 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndns_f32(a: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4829,6 +5257,7 @@ pub unsafe fn vrndns_f32(a: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4842,6 +5271,7 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4855,6 +5285,7 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4868,6 +5299,7 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4881,6 +5313,7 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4894,6 +5327,7 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4907,6 +5341,7 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4920,6 +5355,7 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4933,6 +5369,7 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintz))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4946,6 +5383,7 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintz))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4959,6 +5397,7 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintz))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4972,6 +5411,7 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frintz))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4985,6 +5425,7 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinti))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4998,6 +5439,7 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinti))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5011,6 +5453,7 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinti))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5024,6 +5467,7 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frinti))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5037,6 +5481,7 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { let a: int8x8_t = vdup_n_s8(a); let b: int8x8_t = vdup_n_s8(b); @@ -5047,6 +5492,7 @@ pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { let a: int16x4_t = vdup_n_s16(a); let b: int16x4_t = vdup_n_s16(b); @@ -5057,6 +5503,7 @@ pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { let a: uint8x8_t = vdup_n_u8(a); let b: uint8x8_t = vdup_n_u8(b); @@ -5067,6 +5514,7 @@ pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { let a: uint16x4_t = vdup_n_u16(a); let b: uint16x4_t = vdup_n_u16(b); @@ -5077,6 +5525,7 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5090,6 +5539,7 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5103,6 +5553,7 @@ pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5116,6 +5567,7 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5129,6 +5581,7 @@ pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5142,6 +5595,7 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5155,6 +5609,7 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5168,6 +5623,7 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5181,6 +5637,7 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5194,6 +5651,7 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5207,6 +5665,7 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5220,6 +5679,7 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { transmute(vld2q_s64(transmute(a))) } @@ -5228,6 +5688,7 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { transmute(vld2q_s64(transmute(a))) } @@ -5236,6 +5697,7 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5249,6 +5711,7 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5262,6 +5725,7 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5275,6 +5739,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { transmute(vld2q_dup_s64(transmute(a))) } @@ -5283,6 +5748,7 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { transmute(vld2q_dup_s64(transmute(a))) } @@ -5291,6 +5757,7 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5304,6 +5771,7 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5318,6 +5786,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -5333,6 +5802,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5348,6 +5818,7 @@ pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> i #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5363,6 +5834,7 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) @@ -5373,6 +5845,7 @@ pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { static_assert_imm1!(LANE); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) @@ -5383,6 +5856,7 @@ pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { static_assert_imm4!(LANE); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) @@ -5393,6 +5867,7 @@ pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) @@ -5403,6 +5878,7 @@ pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { static_assert_imm1!(LANE); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) @@ -5413,6 +5889,7 @@ pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { static_assert_imm4!(LANE); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) @@ -5423,6 +5900,7 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5438,6 +5916,7 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5452,6 +5931,7 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5465,6 +5945,7 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { transmute(vld3q_s64(transmute(a))) } @@ -5473,6 +5954,7 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { transmute(vld3q_s64(transmute(a))) } @@ -5481,6 +5963,7 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5494,6 +5977,7 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5507,6 +5991,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5520,6 +6005,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { transmute(vld3q_dup_s64(transmute(a))) } @@ -5528,6 +6014,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { transmute(vld3q_dup_s64(transmute(a))) } @@ -5536,6 +6023,7 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5549,6 +6037,7 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5563,6 +6052,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -5578,6 +6068,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5593,6 +6084,7 @@ pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> i #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5608,6 +6100,7 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) @@ -5618,6 +6111,7 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { static_assert_imm1!(LANE); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) @@ -5628,6 +6122,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { static_assert_imm4!(LANE); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) @@ -5638,6 +6133,7 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { static_assert_imm4!(LANE); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) @@ -5648,6 +6144,7 @@ pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) @@ -5658,6 +6155,7 @@ pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { static_assert_imm1!(LANE); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) @@ -5668,6 +6166,7 @@ pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5683,6 +6182,7 @@ pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5697,6 +6197,7 @@ pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5710,6 +6211,7 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { transmute(vld4q_s64(transmute(a))) } @@ -5718,6 +6220,7 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { transmute(vld4q_s64(transmute(a))) } @@ -5726,6 +6229,7 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5739,6 +6243,7 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5752,6 +6257,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5765,6 +6271,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { transmute(vld4q_dup_s64(transmute(a))) } @@ -5773,6 +6280,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { transmute(vld4q_dup_s64(transmute(a))) } @@ -5781,6 +6289,7 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5794,6 +6303,7 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5808,6 +6318,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -5823,6 +6334,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5838,6 +6350,7 @@ pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> i #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5853,6 +6366,7 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) @@ -5863,6 +6377,7 @@ pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { static_assert_imm1!(LANE); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) @@ -5873,6 +6388,7 @@ pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { static_assert_imm4!(LANE); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) @@ -5883,6 +6399,7 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { static_assert_imm4!(LANE); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) @@ -5893,6 +6410,7 @@ pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { static_assert!(LANE : i32 where LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) @@ -5903,6 +6421,7 @@ pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { static_assert_imm1!(LANE); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) @@ -5913,6 +6432,7 @@ pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -5928,6 +6448,7 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -5943,6 +6464,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -5953,6 +6475,7 @@ pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -5962,6 +6485,7 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5975,6 +6499,7 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5988,6 +6513,7 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6001,6 +6527,7 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6014,6 +6541,7 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6027,6 +6555,7 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6040,6 +6569,7 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6053,6 +6583,7 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { transmute(vst2q_s64(transmute(a), transmute(b))) } @@ -6061,6 +6592,7 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { transmute(vst2q_s64(transmute(a), transmute(b))) } @@ -6069,6 +6601,7 @@ pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6082,6 +6615,7 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6096,6 +6630,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -6111,6 +6646,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6126,6 +6662,7 @@ pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6141,6 +6678,7 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { static_assert_imm4!(LANE); transmute(vst2q_lane_s8::(transmute(a), transmute(b))) @@ -6151,6 +6689,7 @@ pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst2_lane_s64::(transmute(a), transmute(b))) @@ -6161,6 +6700,7 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { static_assert_imm1!(LANE); transmute(vst2q_lane_s64::(transmute(a), transmute(b))) @@ -6171,6 +6711,7 @@ pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { static_assert_imm4!(LANE); transmute(vst2q_lane_s8::(transmute(a), transmute(b))) @@ -6181,6 +6722,7 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst2_lane_s64::(transmute(a), transmute(b))) @@ -6191,6 +6733,7 @@ pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { static_assert_imm1!(LANE); transmute(vst2q_lane_s64::(transmute(a), transmute(b))) @@ -6201,6 +6744,7 @@ pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6216,6 +6760,7 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6230,6 +6775,7 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6243,6 +6789,7 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { transmute(vst3q_s64(transmute(a), transmute(b))) } @@ -6251,6 +6798,7 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { transmute(vst3q_s64(transmute(a), transmute(b))) } @@ -6259,6 +6807,7 @@ pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6272,6 +6821,7 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6286,6 +6836,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -6301,6 +6852,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6316,6 +6868,7 @@ pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6331,6 +6884,7 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { static_assert_imm4!(LANE); transmute(vst3q_lane_s8::(transmute(a), transmute(b))) @@ -6341,6 +6895,7 @@ pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst3_lane_s64::(transmute(a), transmute(b))) @@ -6351,6 +6906,7 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { static_assert_imm1!(LANE); transmute(vst3q_lane_s64::(transmute(a), transmute(b))) @@ -6361,6 +6917,7 @@ pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { static_assert_imm4!(LANE); transmute(vst3q_lane_s8::(transmute(a), transmute(b))) @@ -6371,6 +6928,7 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst3_lane_s64::(transmute(a), transmute(b))) @@ -6381,6 +6939,7 @@ pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { static_assert_imm1!(LANE); transmute(vst3q_lane_s64::(transmute(a), transmute(b))) @@ -6391,6 +6950,7 @@ pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6406,6 +6966,7 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6420,6 +6981,7 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6433,6 +6995,7 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { transmute(vst4q_s64(transmute(a), transmute(b))) } @@ -6441,6 +7004,7 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { transmute(vst4q_s64(transmute(a), transmute(b))) } @@ -6449,6 +7013,7 @@ pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6462,6 +7027,7 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6476,6 +7042,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { static_assert_imm4!(LANE); #[allow(improper_ctypes)] @@ -6491,6 +7058,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6506,6 +7074,7 @@ pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6521,6 +7090,7 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { static_assert_imm4!(LANE); transmute(vst4q_lane_s8::(transmute(a), transmute(b))) @@ -6531,6 +7101,7 @@ pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst4_lane_s64::(transmute(a), transmute(b))) @@ -6541,6 +7112,7 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { static_assert_imm1!(LANE); transmute(vst4q_lane_s64::(transmute(a), transmute(b))) @@ -6551,6 +7123,7 @@ pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { static_assert_imm4!(LANE); transmute(vst4q_lane_s8::(transmute(a), transmute(b))) @@ -6561,6 +7134,7 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { static_assert!(LANE : i32 where LANE == 0); transmute(vst4_lane_s64::(transmute(a), transmute(b))) @@ -6571,6 +7145,7 @@ pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { static_assert_imm1!(LANE); transmute(vst4q_lane_s64::(transmute(a), transmute(b))) @@ -6581,6 +7156,7 @@ pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { static_assert!(LANE : i32 where LANE == 0); #[allow(improper_ctypes)] @@ -6596,6 +7172,7 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -6610,6 +7187,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_mul(a, b) } @@ -6618,6 +7196,7 @@ pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_mul(a, b) } @@ -6626,6 +7205,7 @@ pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { simd_mul(a, vdup_n_f64(b)) } @@ -6634,6 +7214,7 @@ pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { simd_mul(a, vdupq_n_f64(b)) } @@ -6643,6 +7224,7 @@ pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_mul(a, transmute::(simd_extract(b, LANE as u32))) @@ -6653,6 +7235,7 @@ pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { static_assert_imm1!(LANE); simd_mul(a, transmute::(simd_extract(b, LANE as u32))) @@ -6663,6 +7246,7 @@ pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { static_assert!(LANE : i32 where LANE == 0); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -6673,6 +7257,7 @@ pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -6683,6 +7268,7 @@ pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { static_assert_imm1!(LANE); let b: f32 = simd_extract(b, LANE as u32); @@ -6694,6 +7280,7 @@ pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { static_assert_imm2!(LANE); let b: f32 = simd_extract(b, LANE as u32); @@ -6705,6 +7292,7 @@ pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { static_assert!(LANE : i32 where LANE == 0); let b: f64 = simd_extract(b, LANE as u32); @@ -6716,6 +7304,7 @@ pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { static_assert_imm1!(LANE); let b: f64 = simd_extract(b, LANE as u32); @@ -6726,6 +7315,7 @@ pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -6736,6 +7326,7 @@ pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -6746,6 +7337,7 @@ pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -6756,6 +7348,7 @@ pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -6766,6 +7359,7 @@ pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -6776,6 +7370,7 @@ pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -6786,6 +7381,7 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(pmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6799,6 +7395,7 @@ pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(pmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { let a: poly8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: poly8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -6809,6 +7406,7 @@ pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(pmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { vmull_p64(simd_extract(a, 1), simd_extract(b, 1)) } @@ -6817,6 +7415,7 @@ pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { vmull_high_s16(a, vdupq_n_s16(b)) } @@ -6825,6 +7424,7 @@ pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { vmull_high_s32(a, vdupq_n_s32(b)) } @@ -6833,6 +7433,7 @@ pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { vmull_high_u16(a, vdupq_n_u16(b)) } @@ -6841,6 +7442,7 @@ pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { vmull_high_u32(a, vdupq_n_u32(b)) } @@ -6850,6 +7452,7 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmull_high_s16(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6860,6 +7463,7 @@ pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmull_high_s16(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6870,6 +7474,7 @@ pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmull_high_s32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6880,6 +7485,7 @@ pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmull_high_s32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6890,6 +7496,7 @@ pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmull_high_u16(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6900,6 +7507,7 @@ pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmull_high_u16(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6910,6 +7518,7 @@ pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmull_high_u32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6920,6 +7529,7 @@ pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmull_high_u32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6929,6 +7539,7 @@ pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6942,6 +7553,7 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6955,6 +7567,7 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6968,6 +7581,7 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6982,6 +7596,7 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); vmulx_f64(a, transmute::(simd_extract(b, LANE as u32))) @@ -6992,6 +7607,7 @@ pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { static_assert_imm1!(LANE); vmulx_f64(a, transmute::(simd_extract(b, LANE as u32))) @@ -7002,6 +7618,7 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmulx_f32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -7012,6 +7629,7 @@ pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmulx_f32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -7022,6 +7640,7 @@ pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmulxq_f32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -7032,6 +7651,7 @@ pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmulxq_f32(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -7042,6 +7662,7 @@ pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { static_assert!(LANE : i32 where LANE == 0); vmulxq_f64(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -7052,6 +7673,7 @@ pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); vmulxq_f64(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -7061,6 +7683,7 @@ pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7074,6 +7697,7 @@ pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7088,6 +7712,7 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { static_assert_imm1!(LANE); vmulxs_f32(a, simd_extract(b, LANE as u32)) @@ -7098,6 +7723,7 @@ pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { static_assert_imm2!(LANE); vmulxs_f32(a, simd_extract(b, LANE as u32)) @@ -7108,6 +7734,7 @@ pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { static_assert!(LANE : i32 where LANE == 0); vmulxd_f64(a, simd_extract(b, LANE as u32)) @@ -7118,6 +7745,7 @@ pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { static_assert_imm1!(LANE); vmulxd_f64(a, simd_extract(b, LANE as u32)) @@ -7127,6 +7755,7 @@ pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7140,6 +7769,7 @@ pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7153,6 +7783,7 @@ pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { vfma_f64(a, b, vdup_n_f64(c)) } @@ -7161,6 +7792,7 @@ pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { vfmaq_f64(a, b, vdupq_n_f64(c)) } @@ -7170,6 +7802,7 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) @@ -7180,6 +7813,7 @@ pub unsafe fn vfma_lane_f32(a: float32x2_t, b: float32x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) @@ -7190,6 +7824,7 @@ pub unsafe fn vfma_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) @@ -7200,6 +7835,7 @@ pub unsafe fn vfmaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) @@ -7210,6 +7846,7 @@ pub unsafe fn vfmaq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) @@ -7220,6 +7857,7 @@ pub unsafe fn vfma_lane_f64(a: float64x1_t, b: float64x1_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_laneq_f64(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t { static_assert_imm1!(LANE); vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) @@ -7230,6 +7868,7 @@ pub unsafe fn vfma_laneq_f64(a: float64x1_t, b: float64x1_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t { static_assert!(LANE : i32 where LANE == 0); vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) @@ -7240,6 +7879,7 @@ pub unsafe fn vfmaq_lane_f64(a: float64x2_t, b: float64x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_laneq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) @@ -7250,6 +7890,7 @@ pub unsafe fn vfmaq_laneq_f64(a: float64x2_t, b: float64x2_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7266,6 +7907,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7282,6 +7924,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7298,6 +7941,7 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7313,6 +7957,7 @@ pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) - #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { let b: float64x1_t = simd_neg(b); vfma_f64(a, b, c) @@ -7322,6 +7967,7 @@ pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { let b: float64x2_t = simd_neg(b); vfmaq_f64(a, b, c) @@ -7331,6 +7977,7 @@ pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { vfms_f64(a, b, vdup_n_f64(c)) } @@ -7339,6 +7986,7 @@ pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { vfmsq_f64(a, b, vdupq_n_f64(c)) } @@ -7348,6 +7996,7 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) @@ -7358,6 +8007,7 @@ pub unsafe fn vfms_lane_f32(a: float32x2_t, b: float32x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) @@ -7368,6 +8018,7 @@ pub unsafe fn vfms_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) @@ -7378,6 +8029,7 @@ pub unsafe fn vfmsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) @@ -7388,6 +8040,7 @@ pub unsafe fn vfmsq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) @@ -7398,6 +8051,7 @@ pub unsafe fn vfms_lane_f64(a: float64x1_t, b: float64x1_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_laneq_f64(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t { static_assert_imm1!(LANE); vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) @@ -7408,6 +8062,7 @@ pub unsafe fn vfms_laneq_f64(a: float64x1_t, b: float64x1_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t { static_assert!(LANE : i32 where LANE == 0); vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) @@ -7418,6 +8073,7 @@ pub unsafe fn vfmsq_lane_f64(a: float64x2_t, b: float64x2_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_laneq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) @@ -7428,6 +8084,7 @@ pub unsafe fn vfmsq_laneq_f64(a: float64x2_t, b: float64x2_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { vfmas_lane_f32::(a, -b, c) } @@ -7437,6 +8094,7 @@ pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { vfmas_laneq_f32::(a, -b, c) } @@ -7446,6 +8104,7 @@ pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { vfmad_lane_f64::(a, -b, c) } @@ -7455,6 +8114,7 @@ pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { vfmad_laneq_f64::(a, -b, c) } @@ -7463,6 +8123,7 @@ pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) - #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fdiv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_div(a, b) } @@ -7471,6 +8132,7 @@ pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fdiv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_div(a, b) } @@ -7479,6 +8141,7 @@ pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fdiv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_div(a, b) } @@ -7487,6 +8150,7 @@ pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fdiv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_div(a, b) } @@ -7495,6 +8159,7 @@ pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_sub(a, b) } @@ -7503,6 +8168,7 @@ pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_sub(a, b) } @@ -7511,6 +8177,7 @@ pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { a.wrapping_sub(b) } @@ -7519,6 +8186,7 @@ pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { a.wrapping_sub(b) } @@ -7527,6 +8195,7 @@ pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { a.wrapping_add(b) } @@ -7535,6 +8204,7 @@ pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { a.wrapping_add(b) } @@ -7543,6 +8213,7 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(faddp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7556,6 +8227,7 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(faddp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7569,6 +8241,7 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(faddp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7582,6 +8255,7 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7595,6 +8269,7 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7608,6 +8283,7 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7621,6 +8297,7 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7634,6 +8311,7 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7647,6 +8325,7 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7660,6 +8339,7 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7673,6 +8353,7 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7686,6 +8367,7 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let c: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_sub(a, simd_cast(c)) @@ -7695,6 +8377,7 @@ pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let c: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); simd_sub(a, simd_cast(c)) @@ -7704,6 +8387,7 @@ pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let c: int32x2_t = simd_shuffle2!(b, b, [2, 3]); simd_sub(a, simd_cast(c)) @@ -7713,6 +8397,7 @@ pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let c: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_sub(a, simd_cast(c)) @@ -7722,6 +8407,7 @@ pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let c: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); simd_sub(a, simd_cast(c)) @@ -7731,6 +8417,7 @@ pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubw))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let c: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); simd_sub(a, simd_cast(c)) @@ -7740,6 +8427,7 @@ pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let c: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let d: int16x8_t = simd_cast(c); @@ -7752,6 +8440,7 @@ pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let c: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let d: int32x4_t = simd_cast(c); @@ -7764,6 +8453,7 @@ pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ssubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let c: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let d: int64x2_t = simd_cast(c); @@ -7776,6 +8466,7 @@ pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let c: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let d: uint16x8_t = simd_cast(c); @@ -7788,6 +8479,7 @@ pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let c: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let d: uint32x4_t = simd_cast(c); @@ -7800,6 +8492,7 @@ pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usubl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let c: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); let d: uint64x2_t = simd_cast(c); @@ -8466,6 +9159,7 @@ pub unsafe fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmax))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8479,6 +9173,7 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmax))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8492,6 +9187,7 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8505,6 +9201,7 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8518,6 +9215,7 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8531,6 +9229,7 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8544,6 +9243,7 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8557,6 +9257,7 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8570,6 +9271,7 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8583,6 +9285,7 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8596,6 +9299,7 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8609,6 +9313,7 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8622,6 +9327,7 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8635,6 +9341,7 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8648,6 +9355,7 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmin))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8661,6 +9369,7 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmin))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8674,6 +9383,7 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8687,6 +9397,7 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnm))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8700,6 +9411,7 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8713,6 +9425,7 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8726,6 +9439,7 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8739,6 +9453,7 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vmovl_s8(a) @@ -8748,6 +9463,7 @@ pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); vmovl_s16(a) @@ -8757,6 +9473,7 @@ pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); vmovl_s32(a) @@ -8766,6 +9483,7 @@ pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vmovl_u8(a) @@ -8775,6 +9493,7 @@ pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); vmovl_u16(a) @@ -8784,6 +9503,7 @@ pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uxtl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); vmovl_u32(a) @@ -8793,6 +9513,7 @@ pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(faddp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8806,6 +9527,7 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(faddp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8819,6 +9541,7 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { let a1: f32 = simd_extract(a, 0); let a2: f32 = simd_extract(a, 1); @@ -8829,6 +9552,7 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { let a1: f64 = simd_extract(a, 0); let a2: f64 = simd_extract(a, 1); @@ -8839,6 +9563,7 @@ pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8852,6 +9577,7 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8865,6 +9591,7 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8878,6 +9605,7 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8891,6 +9619,7 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8904,6 +9633,7 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8917,6 +9647,7 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8930,6 +9661,7 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { let a: int16x4_t = vdup_n_s16(a); let b: int16x4_t = vdup_n_s16(b); @@ -8940,6 +9672,7 @@ pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8953,6 +9686,7 @@ pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -8963,6 +9697,7 @@ pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -8973,6 +9708,7 @@ pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: int16x4_t = vdup_n_s16(b); @@ -8983,6 +9719,7 @@ pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: int32x2_t = vdup_n_s32(b); @@ -8994,6 +9731,7 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 4))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(N); let b: int16x4_t = simd_shuffle4!(b, b, [N as u32, N as u32, N as u32, N as u32]); @@ -9005,6 +9743,7 @@ pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(N); let b: int32x2_t = simd_shuffle2!(b, b, [N as u32, N as u32]); @@ -9016,6 +9755,7 @@ pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { static_assert_imm2!(N); let b: i16 = simd_extract(b, N as u32); @@ -9027,6 +9767,7 @@ pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 4))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { static_assert_imm3!(N); let b: i16 = simd_extract(b, N as u32); @@ -9038,6 +9779,7 @@ pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { static_assert_imm1!(N); let b: i32 = simd_extract(b, N as u32); @@ -9049,6 +9791,7 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { static_assert_imm2!(N); let b: i32 = simd_extract(b, N as u32); @@ -9060,6 +9803,7 @@ pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(N); let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); @@ -9072,6 +9816,7 @@ pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(N); let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); @@ -9084,6 +9829,7 @@ pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 4))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(N); let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); @@ -9096,6 +9842,7 @@ pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(N); let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); @@ -9107,6 +9854,7 @@ pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) - #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { vqaddq_s32(a, vqdmull_high_s16(b, c)) } @@ -9115,6 +9863,7 @@ pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { vqaddq_s64(a, vqdmull_high_s32(b, c)) } @@ -9123,6 +9872,7 @@ pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_high_n_s16(b, c)) } @@ -9131,6 +9881,7 @@ pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_high_n_s32(b, c)) } @@ -9140,6 +9891,7 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(N); vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) @@ -9150,6 +9902,7 @@ pub unsafe fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(N); vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) @@ -9160,6 +9913,7 @@ pub unsafe fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) @@ -9170,6 +9924,7 @@ pub unsafe fn vqdmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(N); vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) @@ -9180,6 +9935,7 @@ pub unsafe fn vqdmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) @@ -9190,6 +9946,7 @@ pub unsafe fn vqdmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(N); vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) @@ -9199,6 +9956,7 @@ pub unsafe fn vqdmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); vqadds_s32(a, simd_extract(x, 0)) @@ -9208,6 +9966,7 @@ pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { let x: int64x2_t = vqdmull_s32(vdup_n_s32(b), vdup_n_s32(c)); vqaddd_s64(a, simd_extract(x, 0)) @@ -9218,6 +9977,7 @@ pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { static_assert_imm2!(LANE); vqdmlalh_s16(a, b, simd_extract(c, LANE as u32)) @@ -9228,6 +9988,7 @@ pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { static_assert_imm3!(LANE); vqdmlalh_s16(a, b, simd_extract(c, LANE as u32)) @@ -9238,6 +9999,7 @@ pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { static_assert_imm1!(LANE); vqdmlals_s32(a, b, simd_extract(c, LANE as u32)) @@ -9248,6 +10010,7 @@ pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { static_assert_imm2!(LANE); vqdmlals_s32(a, b, simd_extract(c, LANE as u32)) @@ -9257,6 +10020,7 @@ pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { vqsubq_s32(a, vqdmull_high_s16(b, c)) } @@ -9265,6 +10029,7 @@ pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { vqsubq_s64(a, vqdmull_high_s32(b, c)) } @@ -9273,6 +10038,7 @@ pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_high_n_s16(b, c)) } @@ -9281,6 +10047,7 @@ pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_high_n_s32(b, c)) } @@ -9290,6 +10057,7 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(N); vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) @@ -9300,6 +10068,7 @@ pub unsafe fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(N); vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) @@ -9310,6 +10079,7 @@ pub unsafe fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) @@ -9320,6 +10090,7 @@ pub unsafe fn vqdmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(N); vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) @@ -9330,6 +10101,7 @@ pub unsafe fn vqdmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) @@ -9340,6 +10112,7 @@ pub unsafe fn vqdmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(N); vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) @@ -9349,6 +10122,7 @@ pub unsafe fn vqdmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); vqsubs_s32(a, simd_extract(x, 0)) @@ -9358,6 +10132,7 @@ pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { let x: int64x2_t = vqdmull_s32(vdup_n_s32(b), vdup_n_s32(c)); vqsubd_s64(a, simd_extract(x, 0)) @@ -9368,6 +10143,7 @@ pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { static_assert_imm2!(LANE); vqdmlslh_s16(a, b, simd_extract(c, LANE as u32)) @@ -9378,6 +10154,7 @@ pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { static_assert_imm3!(LANE); vqdmlslh_s16(a, b, simd_extract(c, LANE as u32)) @@ -9388,6 +10165,7 @@ pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { static_assert_imm1!(LANE); vqdmlsls_s32(a, b, simd_extract(c, LANE as u32)) @@ -9398,6 +10176,7 @@ pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { static_assert_imm2!(LANE); vqdmlsls_s32(a, b, simd_extract(c, LANE as u32)) @@ -9407,6 +10186,7 @@ pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { let a: int16x4_t = vdup_n_s16(a); let b: int16x4_t = vdup_n_s16(b); @@ -9417,6 +10197,7 @@ pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { let a: int32x2_t = vdup_n_s32(a); let b: int32x2_t = vdup_n_s32(b); @@ -9428,6 +10209,7 @@ pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { static_assert_imm2!(N); let b: i16 = simd_extract(b, N as u32); @@ -9439,6 +10221,7 @@ pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { static_assert_imm3!(N); let b: i16 = simd_extract(b, N as u32); @@ -9450,6 +10233,7 @@ pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { static_assert_imm1!(N); let b: i32 = simd_extract(b, N as u32); @@ -9461,6 +10245,7 @@ pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { static_assert_imm2!(N); let b: i32 = simd_extract(b, N as u32); @@ -9472,6 +10257,7 @@ pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) @@ -9482,6 +10268,7 @@ pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> i #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) @@ -9492,6 +10279,7 @@ pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) @@ -9502,6 +10290,7 @@ pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> i #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) @@ -9511,6 +10300,7 @@ pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnh_s16(a: i16) -> i8 { simd_extract(vqmovn_s16(vdupq_n_s16(a)), 0) } @@ -9519,6 +10309,7 @@ pub unsafe fn vqmovnh_s16(a: i16) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovns_s32(a: i32) -> i16 { simd_extract(vqmovn_s32(vdupq_n_s32(a)), 0) } @@ -9527,6 +10318,7 @@ pub unsafe fn vqmovns_s32(a: i32) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnh_u16(a: u16) -> u8 { simd_extract(vqmovn_u16(vdupq_n_u16(a)), 0) } @@ -9535,6 +10327,7 @@ pub unsafe fn vqmovnh_u16(a: u16) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovns_u32(a: u32) -> u16 { simd_extract(vqmovn_u32(vdupq_n_u32(a)), 0) } @@ -9543,6 +10336,7 @@ pub unsafe fn vqmovns_u32(a: u32) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9556,6 +10350,7 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_u64(a: u64) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9569,6 +10364,7 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { simd_shuffle16!(a, vqmovn_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -9577,6 +10373,7 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { simd_shuffle8!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -9585,6 +10382,7 @@ pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { simd_shuffle4!(a, vqmovn_s64(b), [0, 1, 2, 3]) } @@ -9593,6 +10391,7 @@ pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { simd_shuffle16!(a, vqmovn_u16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -9601,6 +10400,7 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { simd_shuffle8!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -9609,6 +10409,7 @@ pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { simd_shuffle4!(a, vqmovn_u64(b), [0, 1, 2, 3]) } @@ -9617,6 +10418,7 @@ pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovunh_s16(a: i16) -> u8 { simd_extract(vqmovun_s16(vdupq_n_s16(a)), 0) } @@ -9625,6 +10427,7 @@ pub unsafe fn vqmovunh_s16(a: i16) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovuns_s32(a: i32) -> u16 { simd_extract(vqmovun_s32(vdupq_n_s32(a)), 0) } @@ -9633,6 +10436,7 @@ pub unsafe fn vqmovuns_s32(a: i32) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovund_s64(a: i64) -> u32 { simd_extract(vqmovun_s64(vdupq_n_s64(a)), 0) } @@ -9641,6 +10445,7 @@ pub unsafe fn vqmovund_s64(a: i64) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { simd_shuffle16!(a, vqmovun_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -9649,6 +10454,7 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { simd_shuffle8!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -9657,6 +10463,7 @@ pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { simd_shuffle4!(a, vqmovun_s64(b), [0, 1, 2, 3]) } @@ -9665,6 +10472,7 @@ pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { simd_extract(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) } @@ -9673,6 +10481,7 @@ pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { simd_extract(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) } @@ -9682,6 +10491,7 @@ pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { static_assert_imm2!(LANE); vqrdmulhh_s16(a, simd_extract(b, LANE as u32)) @@ -9692,6 +10502,7 @@ pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { static_assert_imm3!(LANE); vqrdmulhh_s16(a, simd_extract(b, LANE as u32)) @@ -9702,6 +10513,7 @@ pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { static_assert_imm1!(LANE); vqrdmulhs_s32(a, simd_extract(b, LANE as u32)) @@ -9712,6 +10524,7 @@ pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { static_assert_imm2!(LANE); vqrdmulhs_s32(a, simd_extract(b, LANE as u32)) @@ -9895,6 +10708,7 @@ pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { vqsubh_s16(a, vqrdmulhh_s16(b, c)) } @@ -9903,6 +10717,7 @@ pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { vqsubs_s32(a, vqrdmulhs_s32(b, c)) } @@ -9912,6 +10727,7 @@ pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { static_assert_imm2!(LANE); vqsubh_s16(a, vqrdmulhh_lane_s16::(b, c)) @@ -9922,6 +10738,7 @@ pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { static_assert_imm3!(LANE); vqsubh_s16(a, vqrdmulhh_laneq_s16::(b, c)) @@ -9932,6 +10749,7 @@ pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { static_assert_imm1!(LANE); vqsubs_s32(a, vqrdmulhs_lane_s32::(b, c)) @@ -9942,6 +10760,7 @@ pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { static_assert_imm2!(LANE); vqsubs_s32(a, vqrdmulhs_laneq_s32::(b, c)) @@ -9951,6 +10770,7 @@ pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9964,6 +10784,7 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9977,6 +10798,7 @@ pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { let a: int8x8_t = vdup_n_s8(a); let b: int8x8_t = vdup_n_s8(b); @@ -9987,6 +10809,7 @@ pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { let a: int16x4_t = vdup_n_s16(a); let b: int16x4_t = vdup_n_s16(b); @@ -9997,6 +10820,7 @@ pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10010,6 +10834,7 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10023,6 +10848,7 @@ pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { let a: uint8x8_t = vdup_n_u8(a); let b: int8x8_t = vdup_n_s8(b); @@ -10033,6 +10859,7 @@ pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { let a: uint16x4_t = vdup_n_u16(a); let b: int16x4_t = vdup_n_s16(b); @@ -10044,6 +10871,7 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { static_assert!(N : i32 where N >= 1 && N <= 8); let a: int16x8_t = vdupq_n_s16(a); @@ -10055,6 +10883,7 @@ pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { static_assert!(N : i32 where N >= 1 && N <= 16); let a: int32x4_t = vdupq_n_s32(a); @@ -10066,6 +10895,7 @@ pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { static_assert!(N : i32 where N >= 1 && N <= 32); let a: int64x2_t = vdupq_n_s64(a); @@ -10077,6 +10907,7 @@ pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqrshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10087,6 +10918,7 @@ pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10097,6 +10929,7 @@ pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) @@ -10107,6 +10940,7 @@ pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { static_assert!(N : i32 where N >= 1 && N <= 8); let a: uint16x8_t = vdupq_n_u16(a); @@ -10118,6 +10952,7 @@ pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { static_assert!(N : i32 where N >= 1 && N <= 16); let a: uint32x4_t = vdupq_n_u32(a); @@ -10129,6 +10964,7 @@ pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); let a: uint64x2_t = vdupq_n_u64(a); @@ -10140,6 +10976,7 @@ pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqrshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10150,6 +10987,7 @@ pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10160,6 +10998,7 @@ pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) @@ -10170,6 +11009,7 @@ pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { static_assert!(N : i32 where N >= 1 && N <= 8); let a: int16x8_t = vdupq_n_s16(a); @@ -10181,6 +11021,7 @@ pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { static_assert!(N : i32 where N >= 1 && N <= 16); let a: int32x4_t = vdupq_n_s32(a); @@ -10192,6 +11033,7 @@ pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); let a: int64x2_t = vdupq_n_s64(a); @@ -10203,6 +11045,7 @@ pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqrshrun_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10213,6 +11056,7 @@ pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10223,6 +11067,7 @@ pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) @@ -10232,6 +11077,7 @@ pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10245,6 +11091,7 @@ pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); simd_extract(c, 0) @@ -10254,6 +11101,7 @@ pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); simd_extract(c, 0) @@ -10263,6 +11111,7 @@ pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); simd_extract(c, 0) @@ -10272,6 +11121,7 @@ pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10285,6 +11135,7 @@ pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); simd_extract(c, 0) @@ -10294,6 +11145,7 @@ pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); simd_extract(c, 0) @@ -10303,6 +11155,7 @@ pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); simd_extract(c, 0) @@ -10313,6 +11166,7 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { static_assert_imm3!(N); simd_extract(vqshl_n_s8::(vdup_n_s8(a)), 0) @@ -10323,6 +11177,7 @@ pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { static_assert_imm4!(N); simd_extract(vqshl_n_s16::(vdup_n_s16(a)), 0) @@ -10333,6 +11188,7 @@ pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_n_s32(a: i32) -> i32 { static_assert_imm5!(N); simd_extract(vqshl_n_s32::(vdup_n_s32(a)), 0) @@ -10343,6 +11199,7 @@ pub unsafe fn vqshls_n_s32(a: i32) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_n_s64(a: i64) -> i64 { static_assert_imm6!(N); simd_extract(vqshl_n_s64::(vdup_n_s64(a)), 0) @@ -10353,6 +11210,7 @@ pub unsafe fn vqshld_n_s64(a: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { static_assert_imm3!(N); simd_extract(vqshl_n_u8::(vdup_n_u8(a)), 0) @@ -10363,6 +11221,7 @@ pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { static_assert_imm4!(N); simd_extract(vqshl_n_u16::(vdup_n_u16(a)), 0) @@ -10373,6 +11232,7 @@ pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_n_u32(a: u32) -> u32 { static_assert_imm5!(N); simd_extract(vqshl_n_u32::(vdup_n_u32(a)), 0) @@ -10383,6 +11243,7 @@ pub unsafe fn vqshls_n_u32(a: u32) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_n_u64(a: u64) -> u64 { static_assert_imm6!(N); simd_extract(vqshl_n_u64::(vdup_n_u64(a)), 0) @@ -10393,6 +11254,7 @@ pub unsafe fn vqshld_n_u64(a: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { static_assert_imm3!(N); simd_extract(vqshlu_n_s8::(vdup_n_s8(a)), 0) @@ -10403,6 +11265,7 @@ pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { static_assert_imm4!(N); simd_extract(vqshlu_n_s16::(vdup_n_s16(a)), 0) @@ -10413,6 +11276,7 @@ pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { static_assert_imm5!(N); simd_extract(vqshlu_n_s32::(vdup_n_s32(a)), 0) @@ -10423,6 +11287,7 @@ pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { static_assert_imm6!(N); simd_extract(vqshlu_n_s64::(vdup_n_s64(a)), 0) @@ -10433,6 +11298,7 @@ pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -10448,6 +11314,7 @@ pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { static_assert!(N : i32 where N >= 1 && N <= 8); simd_extract(vqshrn_n_s16::(vdupq_n_s16(a)), 0) @@ -10458,6 +11325,7 @@ pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { static_assert!(N : i32 where N >= 1 && N <= 16); simd_extract(vqshrn_n_s32::(vdupq_n_s32(a)), 0) @@ -10468,6 +11336,7 @@ pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10478,6 +11347,7 @@ pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10488,6 +11358,7 @@ pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) @@ -10498,6 +11369,7 @@ pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -10513,6 +11385,7 @@ pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { static_assert!(N : i32 where N >= 1 && N <= 8); simd_extract(vqshrn_n_u16::(vdupq_n_u16(a)), 0) @@ -10523,6 +11396,7 @@ pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { static_assert!(N : i32 where N >= 1 && N <= 16); simd_extract(vqshrn_n_u32::(vdupq_n_u32(a)), 0) @@ -10533,6 +11407,7 @@ pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10543,6 +11418,7 @@ pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10553,6 +11429,7 @@ pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) @@ -10563,6 +11440,7 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { static_assert!(N : i32 where N >= 1 && N <= 8); simd_extract(vqshrun_n_s16::(vdupq_n_s16(a)), 0) @@ -10573,6 +11451,7 @@ pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { static_assert!(N : i32 where N >= 1 && N <= 16); simd_extract(vqshrun_n_s32::(vdupq_n_s32(a)), 0) @@ -10583,6 +11462,7 @@ pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); simd_extract(vqshrun_n_s64::(vdupq_n_s64(a)), 0) @@ -10593,6 +11473,7 @@ pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vqshrun_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -10603,6 +11484,7 @@ pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> ui #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -10613,6 +11495,7 @@ pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) @@ -10622,6 +11505,7 @@ pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> u #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { simd_extract(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) } @@ -10630,6 +11514,7 @@ pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { simd_extract(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) } @@ -10638,6 +11523,7 @@ pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10651,6 +11537,7 @@ pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10664,6 +11551,7 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { simd_fsqrt(a) } @@ -10672,6 +11560,7 @@ pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { simd_fsqrt(a) } @@ -10680,6 +11569,7 @@ pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { simd_fsqrt(a) } @@ -10688,6 +11578,7 @@ pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { simd_fsqrt(a) } @@ -10696,6 +11587,7 @@ pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10709,6 +11601,7 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10722,6 +11615,7 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10735,6 +11629,7 @@ pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrted_f64(a: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10748,6 +11643,7 @@ pub unsafe fn vrsqrted_f64(a: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10761,6 +11657,7 @@ pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10774,6 +11671,7 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10787,6 +11685,7 @@ pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10800,6 +11699,7 @@ pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10813,6 +11713,7 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10826,6 +11727,7 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpes_f32(a: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10839,6 +11741,7 @@ pub unsafe fn vrecpes_f32(a: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecped_f64(a: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10852,6 +11755,7 @@ pub unsafe fn vrecped_f64(a: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10865,6 +11769,7 @@ pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10878,6 +11783,7 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10891,6 +11797,7 @@ pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10904,6 +11811,7 @@ pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxs_f32(a: f32) -> f32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10917,6 +11825,7 @@ pub unsafe fn vrecpxs_f32(a: f32) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxd_f64(a: f64) -> f64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10930,6 +11839,7 @@ pub unsafe fn vrecpxd_f64(a: f64) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { transmute(a) } @@ -10938,6 +11848,7 @@ pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { transmute(a) } @@ -10946,6 +11857,7 @@ pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { transmute(a) } @@ -10954,6 +11866,7 @@ pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { transmute(a) } @@ -10962,6 +11875,7 @@ pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { transmute(a) } @@ -10970,6 +11884,7 @@ pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { transmute(a) } @@ -10978,6 +11893,7 @@ pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { transmute(a) } @@ -10986,6 +11902,7 @@ pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { transmute(a) } @@ -10994,6 +11911,7 @@ pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { transmute(a) } @@ -11002,6 +11920,7 @@ pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { transmute(a) } @@ -11010,6 +11929,7 @@ pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { transmute(a) } @@ -11018,6 +11938,7 @@ pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { transmute(a) } @@ -11026,6 +11947,7 @@ pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { transmute(a) } @@ -11034,6 +11956,7 @@ pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { transmute(a) } @@ -11042,6 +11965,7 @@ pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { transmute(a) } @@ -11050,6 +11974,7 @@ pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { transmute(a) } @@ -11058,6 +11983,7 @@ pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { transmute(a) } @@ -11066,6 +11992,7 @@ pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { transmute(a) } @@ -11074,6 +12001,7 @@ pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { transmute(a) } @@ -11082,6 +12010,7 @@ pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { transmute(a) } @@ -11090,6 +12019,7 @@ pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { transmute(a) } @@ -11098,6 +12028,7 @@ pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { transmute(a) } @@ -11106,6 +12037,7 @@ pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { transmute(a) } @@ -11114,6 +12046,7 @@ pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { transmute(a) } @@ -11122,6 +12055,7 @@ pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { transmute(a) } @@ -11130,6 +12064,7 @@ pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { transmute(a) } @@ -11138,6 +12073,7 @@ pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { transmute(a) } @@ -11146,6 +12082,7 @@ pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { transmute(a) } @@ -11154,6 +12091,7 @@ pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { transmute(a) } @@ -11162,6 +12100,7 @@ pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { transmute(a) } @@ -11170,6 +12109,7 @@ pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { transmute(a) } @@ -11178,6 +12118,7 @@ pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { transmute(a) } @@ -11186,6 +12127,7 @@ pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { transmute(a) } @@ -11194,6 +12136,7 @@ pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { transmute(a) } @@ -11202,6 +12145,7 @@ pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { transmute(a) } @@ -11210,6 +12154,7 @@ pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { transmute(a) } @@ -11218,6 +12163,7 @@ pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { transmute(a) } @@ -11226,6 +12172,7 @@ pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { transmute(a) } @@ -11234,6 +12181,7 @@ pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { transmute(a) } @@ -11242,6 +12190,7 @@ pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { transmute(a) } @@ -11250,6 +12199,7 @@ pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { transmute(a) } @@ -11258,6 +12208,7 @@ pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { transmute(a) } @@ -11266,6 +12217,7 @@ pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { transmute(a) } @@ -11274,6 +12226,7 @@ pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { transmute(a) } @@ -11282,6 +12235,7 @@ pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { transmute(a) } @@ -11290,6 +12244,7 @@ pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { transmute(a) } @@ -11298,6 +12253,7 @@ pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { transmute(a) } @@ -11306,6 +12262,7 @@ pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { transmute(a) } @@ -11314,6 +12271,7 @@ pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { transmute(a) } @@ -11322,6 +12280,7 @@ pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { transmute(a) } @@ -11330,6 +12289,7 @@ pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { transmute(a) } @@ -11338,6 +12298,7 @@ pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { transmute(a) } @@ -11346,6 +12307,7 @@ pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { transmute(a) } @@ -11354,6 +12316,7 @@ pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { transmute(a) } @@ -11362,6 +12325,7 @@ pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { transmute(a) } @@ -11370,6 +12334,7 @@ pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { transmute(a) } @@ -11378,6 +12343,7 @@ pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { transmute(a) } @@ -11386,6 +12352,7 @@ pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { transmute(a) } @@ -11394,6 +12361,7 @@ pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { transmute(a) } @@ -11402,6 +12370,7 @@ pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { transmute(a) } @@ -11410,6 +12379,7 @@ pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { transmute(a) } @@ -11418,6 +12388,7 @@ pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { transmute(a) } @@ -11426,6 +12397,7 @@ pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11439,6 +12411,7 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(urshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11453,6 +12426,7 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); vrshld_s64(a, -N as i64) @@ -11463,6 +12437,7 @@ pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); vrshld_u64(a, -N as i64) @@ -11473,6 +12448,7 @@ pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vrshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11483,6 +12459,7 @@ pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11493,6 +12470,7 @@ pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) @@ -11503,6 +12481,7 @@ pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vrshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11513,6 +12492,7 @@ pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11523,6 +12503,7 @@ pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) @@ -11533,6 +12514,7 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); let b: i64 = vrshrd_n_s64::(b); @@ -11544,6 +12526,7 @@ pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); let b: u64 = vrshrd_n_u64::(b); @@ -11554,6 +12537,7 @@ pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { let x: int8x8_t = vrsubhn_s16(b, c); simd_shuffle16!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11563,6 +12547,7 @@ pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { let x: int16x4_t = vrsubhn_s32(b, c); simd_shuffle8!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11572,6 +12557,7 @@ pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { let x: int32x2_t = vrsubhn_s64(b, c); simd_shuffle4!(a, x, [0, 1, 2, 3]) @@ -11581,6 +12567,7 @@ pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { let x: uint8x8_t = vrsubhn_u16(b, c); simd_shuffle16!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11590,6 +12577,7 @@ pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { let x: uint16x4_t = vrsubhn_u32(b, c); simd_shuffle8!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11599,6 +12587,7 @@ pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { let x: uint32x2_t = vrsubhn_u64(b, c); simd_shuffle4!(a, x, [0, 1, 2, 3]) @@ -11609,6 +12598,7 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -11619,6 +12609,7 @@ pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -11628,6 +12619,7 @@ pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { transmute(vshl_s64(transmute(a), transmute(b))) } @@ -11636,6 +12628,7 @@ pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { transmute(vshl_u64(transmute(a), transmute(b))) } @@ -11645,6 +12638,7 @@ pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); let b: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -11656,6 +12650,7 @@ pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); let b: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); @@ -11667,6 +12662,7 @@ pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); let b: int32x2_t = simd_shuffle2!(a, a, [2, 3]); @@ -11678,6 +12674,7 @@ pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); let b: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -11689,6 +12686,7 @@ pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); let b: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); @@ -11700,6 +12698,7 @@ pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); let b: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); @@ -11711,6 +12710,7 @@ pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11721,6 +12721,7 @@ pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11731,6 +12732,7 @@ pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int1 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) @@ -11741,6 +12743,7 @@ pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int3 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_shuffle16!(a, vshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -11751,6 +12754,7 @@ pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uin #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_shuffle8!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) @@ -11761,6 +12765,7 @@ pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> ui #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_shuffle4!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) @@ -12004,6 +13009,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12012,6 +13018,7 @@ pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]) } @@ -12020,6 +13027,7 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12028,6 +13036,7 @@ pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12036,6 +13045,7 @@ pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12044,6 +13054,7 @@ pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12052,6 +13063,7 @@ pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]) } @@ -12060,6 +13072,7 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12068,6 +13081,7 @@ pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12076,6 +13090,7 @@ pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12084,6 +13099,7 @@ pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12092,6 +13108,7 @@ pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]) } @@ -12100,6 +13117,7 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12108,6 +13126,7 @@ pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -12116,6 +13135,7 @@ pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12124,6 +13144,7 @@ pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12132,6 +13153,7 @@ pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12140,6 +13162,7 @@ pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12148,6 +13171,7 @@ pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12156,6 +13180,7 @@ pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [0, 4, 2, 6]) } @@ -12164,6 +13189,7 @@ pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12172,6 +13198,7 @@ pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12180,6 +13207,7 @@ pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12188,6 +13216,7 @@ pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]) } @@ -12196,6 +13225,7 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12204,6 +13234,7 @@ pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12212,6 +13243,7 @@ pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12220,6 +13252,7 @@ pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12228,6 +13261,7 @@ pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]) } @@ -12236,6 +13270,7 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12244,6 +13279,7 @@ pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12252,6 +13288,7 @@ pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12260,6 +13297,7 @@ pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12268,6 +13306,7 @@ pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]) } @@ -12276,6 +13315,7 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12284,6 +13324,7 @@ pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -12292,6 +13333,7 @@ pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12300,6 +13342,7 @@ pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12308,6 +13351,7 @@ pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12316,6 +13360,7 @@ pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12324,6 +13369,7 @@ pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12332,6 +13378,7 @@ pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(trn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [1, 5, 3, 7]) } @@ -12340,6 +13387,7 @@ pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12348,6 +13396,7 @@ pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12356,6 +13405,7 @@ pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12364,6 +13414,7 @@ pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]) } @@ -12372,6 +13423,7 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12380,6 +13432,7 @@ pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12388,6 +13441,7 @@ pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12396,6 +13450,7 @@ pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12404,6 +13459,7 @@ pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12412,6 +13468,7 @@ pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12420,6 +13477,7 @@ pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]) } @@ -12428,6 +13486,7 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12436,6 +13495,7 @@ pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12444,6 +13504,7 @@ pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12452,6 +13513,7 @@ pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12460,6 +13522,7 @@ pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12468,6 +13531,7 @@ pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12476,6 +13540,7 @@ pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]) } @@ -12484,6 +13549,7 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12492,6 +13558,7 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -12500,6 +13567,7 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12508,6 +13576,7 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12516,6 +13585,7 @@ pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [0, 4, 1, 5]) } @@ -12524,6 +13594,7 @@ pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12532,6 +13603,7 @@ pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12540,6 +13612,7 @@ pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]) } @@ -12548,6 +13621,7 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12556,6 +13630,7 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12564,6 +13639,7 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12572,6 +13648,7 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12580,6 +13657,7 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12588,6 +13666,7 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12596,6 +13675,7 @@ pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]) } @@ -12604,6 +13684,7 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12612,6 +13693,7 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12620,6 +13702,7 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12628,6 +13711,7 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12636,6 +13720,7 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12644,6 +13729,7 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12652,6 +13738,7 @@ pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]) } @@ -12660,6 +13747,7 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12668,6 +13756,7 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } @@ -12676,6 +13765,7 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12684,6 +13774,7 @@ pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12692,6 +13783,7 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [2, 6, 3, 7]) } @@ -12700,6 +13792,7 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -12708,6 +13801,7 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12716,6 +13810,7 @@ pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]) } @@ -12724,6 +13819,7 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12732,6 +13828,7 @@ pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12740,6 +13837,7 @@ pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12748,6 +13846,7 @@ pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12756,6 +13855,7 @@ pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]) } @@ -12764,6 +13864,7 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12772,6 +13873,7 @@ pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12780,6 +13882,7 @@ pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12788,6 +13891,7 @@ pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12796,6 +13900,7 @@ pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]) } @@ -12804,6 +13909,7 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12812,6 +13918,7 @@ pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -12820,6 +13927,7 @@ pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12828,6 +13936,7 @@ pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12836,6 +13945,7 @@ pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12844,6 +13954,7 @@ pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12852,6 +13963,7 @@ pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12860,6 +13972,7 @@ pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [0, 2, 4, 6]) } @@ -12868,6 +13981,7 @@ pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12876,6 +13990,7 @@ pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [0, 2]) } @@ -12884,6 +13999,7 @@ pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12892,6 +14008,7 @@ pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]) } @@ -12900,6 +14017,7 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -12908,6 +14026,7 @@ pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12916,6 +14035,7 @@ pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -12924,6 +14044,7 @@ pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12932,6 +14053,7 @@ pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]) } @@ -12940,6 +14062,7 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -12948,6 +14071,7 @@ pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12956,6 +14080,7 @@ pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -12964,6 +14089,7 @@ pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12972,6 +14098,7 @@ pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]) } @@ -12980,6 +14107,7 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -12988,6 +14116,7 @@ pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -12996,6 +14125,7 @@ pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13004,6 +14134,7 @@ pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13012,6 +14143,7 @@ pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13020,6 +14152,7 @@ pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13028,6 +14161,7 @@ pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13036,6 +14170,7 @@ pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uzp2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle4!(a, b, [1, 3, 5, 7]) } @@ -13044,6 +14179,7 @@ pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13052,6 +14188,7 @@ pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(zip2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle2!(a, b, [1, 3]) } @@ -13060,6 +14197,7 @@ pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { let d: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let e: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -13071,6 +14209,7 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { let d: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let e: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -13082,6 +14221,7 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { let d: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); let e: uint32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -13093,6 +14233,7 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { let d: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let e: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -13105,6 +14246,7 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { let d: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let e: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]); @@ -13117,6 +14259,7 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sabal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { let d: int32x2_t = simd_shuffle2!(b, b, [2, 3]); let e: int32x2_t = simd_shuffle2!(c, c, [2, 3]); @@ -13129,6 +14272,7 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13142,6 +14286,7 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13155,6 +14300,7 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabsb_s8(a: i8) -> i8 { simd_extract(vqabs_s8(vdup_n_s8(a)), 0) } @@ -13163,6 +14309,7 @@ pub unsafe fn vqabsb_s8(a: i8) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabsh_s16(a: i16) -> i16 { simd_extract(vqabs_s16(vdup_n_s16(a)), 0) } @@ -13171,6 +14318,7 @@ pub unsafe fn vqabsh_s16(a: i16) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabss_s32(a: i32) -> i32 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13184,6 +14332,7 @@ pub unsafe fn vqabss_s32(a: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqabs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqabsd_s64(a: i64) -> i64 { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13198,6 +14347,7 @@ pub unsafe fn vqabsd_s64(a: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { static_assert!(N : i32 where N >= 0 && N <= 63); transmute(vsli_n_s64::(transmute(a), transmute(b))) @@ -13208,6 +14358,7 @@ pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { static_assert!(N : i32 where N >= 0 && N <= 63); transmute(vsli_n_u64::(transmute(a), transmute(b))) @@ -13218,6 +14369,7 @@ pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); transmute(vsri_n_s64::(transmute(a), transmute(b))) @@ -13228,6 +14380,7 @@ pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); transmute(vsri_n_u64::(transmute(a), transmute(b))) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 0c2b06d73c..65ba527ee0 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -20,19 +20,24 @@ use stdarch_test::assert_instr; types! { /// ARM-specific 64-bit wide vector of one packed `f64`. + #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x1_t(f64); // FIXME: check this! /// ARM-specific 128-bit wide vector of two packed `f64`. + #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x2_t(f64, f64); } /// ARM-specific type containing two `float64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x1x2_t(pub float64x1_t, pub float64x1_t); /// ARM-specific type containing three `float64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x1x3_t(pub float64x1_t, pub float64x1_t, pub float64x1_t); /// ARM-specific type containing four `float64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x1x4_t( pub float64x1_t, pub float64x1_t, @@ -42,12 +47,15 @@ pub struct float64x1x4_t( /// ARM-specific type containing two `float64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x2x2_t(pub float64x2_t, pub float64x2_t); /// ARM-specific type containing three `float64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x2x3_t(pub float64x2_t, pub float64x2_t, pub float64x2_t); /// ARM-specific type containing four `float64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub struct float64x2x4_t( pub float64x2_t, pub float64x2_t, @@ -354,6 +362,7 @@ extern "unadjusted" { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s64( _a: int64x1_t, b: int64x1_t, @@ -368,6 +377,7 @@ pub unsafe fn vcopy_lane_s64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u64( _a: uint64x1_t, b: uint64x1_t, @@ -382,6 +392,7 @@ pub unsafe fn vcopy_lane_u64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_p64( _a: poly64x1_t, b: poly64x1_t, @@ -396,6 +407,7 @@ pub unsafe fn vcopy_lane_p64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_f64( _a: float64x1_t, b: float64x1_t, @@ -410,6 +422,7 @@ pub unsafe fn vcopy_lane_f64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s64( _a: int64x1_t, b: int64x2_t, @@ -424,6 +437,7 @@ pub unsafe fn vcopy_laneq_s64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u64( _a: uint64x1_t, b: uint64x2_t, @@ -438,6 +452,7 @@ pub unsafe fn vcopy_laneq_u64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_p64( _a: poly64x1_t, b: poly64x2_t, @@ -452,6 +467,7 @@ pub unsafe fn vcopy_laneq_p64( #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_f64( _a: float64x1_t, b: float64x2_t, @@ -465,6 +481,7 @@ pub unsafe fn vcopy_laneq_f64( #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { read_unaligned(ptr.cast()) } @@ -473,6 +490,7 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { read_unaligned(ptr.cast()) } @@ -481,6 +499,7 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { read_unaligned(ptr.cast()) } @@ -489,6 +508,7 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { read_unaligned(ptr.cast()) } @@ -497,6 +517,7 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { read_unaligned(ptr.cast()) } @@ -505,6 +526,7 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { read_unaligned(ptr.cast()) } @@ -513,6 +535,7 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { read_unaligned(ptr.cast()) } @@ -521,6 +544,7 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { read_unaligned(ptr.cast()) } @@ -529,6 +553,7 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { read_unaligned(ptr.cast()) } @@ -537,6 +562,7 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { read_unaligned(ptr.cast()) } @@ -545,6 +571,7 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { read_unaligned(ptr.cast()) } @@ -553,6 +580,7 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { read_unaligned(ptr.cast()) } @@ -561,6 +589,7 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { read_unaligned(ptr.cast()) } @@ -569,6 +598,7 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { read_unaligned(ptr.cast()) } @@ -577,6 +607,7 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { read_unaligned(ptr.cast()) } @@ -585,6 +616,7 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { read_unaligned(ptr.cast()) } @@ -593,6 +625,7 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { read_unaligned(ptr.cast()) } @@ -601,6 +634,7 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { read_unaligned(ptr.cast()) } @@ -609,6 +643,7 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { read_unaligned(ptr.cast()) } @@ -617,6 +652,7 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { read_unaligned(ptr.cast()) } @@ -625,6 +661,7 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { read_unaligned(ptr.cast()) } @@ -633,6 +670,7 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { read_unaligned(ptr.cast()) } @@ -641,6 +679,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { read_unaligned(ptr.cast()) } @@ -649,6 +688,7 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { read_unaligned(ptr.cast()) } @@ -657,6 +697,7 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { read_unaligned(ptr.cast()) } @@ -665,6 +706,7 @@ pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { read_unaligned(ptr.cast()) } @@ -673,6 +715,7 @@ pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t { vld1_f64(ptr) } @@ -681,6 +724,7 @@ pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld1r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t { let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.))); simd_shuffle2!(x, x, [0, 0]) @@ -691,6 +735,7 @@ pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t { #[target_feature(enable = "neon")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(ldr, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) -> float64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -701,6 +746,7 @@ pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) #[target_feature(enable = "neon")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(ld1, LANE = 1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) -> float64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -711,6 +757,7 @@ pub unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { write_unaligned(ptr.cast(), a); } @@ -720,6 +767,7 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { write_unaligned(ptr.cast(), a); } @@ -729,6 +777,7 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { write_unaligned(ptr.cast(), a); } @@ -738,6 +787,7 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { write_unaligned(ptr.cast(), a); } @@ -747,6 +797,7 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { write_unaligned(ptr.cast(), a); } @@ -756,6 +807,7 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { write_unaligned(ptr.cast(), a); } @@ -765,6 +817,7 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { write_unaligned(ptr.cast(), a); } @@ -774,6 +827,7 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { write_unaligned(ptr.cast(), a); } @@ -783,6 +837,7 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { write_unaligned(ptr.cast(), a); } @@ -792,6 +847,7 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { write_unaligned(ptr.cast(), a); } @@ -801,6 +857,7 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { write_unaligned(ptr.cast(), a); } @@ -810,6 +867,7 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { write_unaligned(ptr.cast(), a); } @@ -819,6 +877,7 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { write_unaligned(ptr.cast(), a); } @@ -828,6 +887,7 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { write_unaligned(ptr.cast(), a); } @@ -837,6 +897,7 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { write_unaligned(ptr.cast(), a); } @@ -846,6 +907,7 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { write_unaligned(ptr.cast(), a); } @@ -855,6 +917,7 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { write_unaligned(ptr.cast(), a); } @@ -864,6 +927,7 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { write_unaligned(ptr.cast(), a); } @@ -873,6 +937,7 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { write_unaligned(ptr.cast(), a); } @@ -882,6 +947,7 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { write_unaligned(ptr.cast(), a); } @@ -891,6 +957,7 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { write_unaligned(ptr.cast(), a); } @@ -900,6 +967,7 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { write_unaligned(ptr.cast(), a); } @@ -909,6 +977,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { write_unaligned(ptr.cast(), a); } @@ -918,6 +987,7 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { write_unaligned(ptr.cast(), a); } @@ -927,6 +997,7 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { write_unaligned(ptr.cast(), a); } @@ -936,6 +1007,7 @@ pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { write_unaligned(ptr.cast(), a); } @@ -944,6 +1016,7 @@ pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(abs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabsd_s64(a: i64) -> i64 { vabsd_s64_(a) } @@ -951,6 +1024,7 @@ pub unsafe fn vabsd_s64(a: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(abs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { vabs_s64_(a) } @@ -958,6 +1032,7 @@ pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(abs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { vabsq_s64_(a) } @@ -968,6 +1043,7 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(bsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -975,6 +1051,7 @@ pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(bsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -982,6 +1059,7 @@ pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_ #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(bsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -989,6 +1067,7 @@ pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float6 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(bsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -997,6 +1076,7 @@ pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2 #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { vuqadd_s8_(a, b) } @@ -1004,6 +1084,7 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { vuqaddq_s8_(a, b) } @@ -1011,6 +1092,7 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { vuqadd_s16_(a, b) } @@ -1018,6 +1100,7 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { vuqaddq_s16_(a, b) } @@ -1025,6 +1108,7 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { vuqadd_s32_(a, b) } @@ -1032,6 +1116,7 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { vuqaddq_s32_(a, b) } @@ -1039,6 +1124,7 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { vuqadd_s64_(a, b) } @@ -1046,6 +1132,7 @@ pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { vuqaddq_s64_(a, b) } @@ -1054,6 +1141,7 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { vsqadd_u8_(a, b) } @@ -1061,6 +1149,7 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { vsqaddq_u8_(a, b) } @@ -1068,6 +1157,7 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { vsqadd_u16_(a, b) } @@ -1075,6 +1165,7 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { vsqaddq_u16_(a, b) } @@ -1082,6 +1173,7 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { vsqadd_u32_(a, b) } @@ -1089,6 +1181,7 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { vsqaddq_u32_(a, b) } @@ -1096,6 +1189,7 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { vsqadd_u64_(a, b) } @@ -1103,6 +1197,7 @@ pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { vsqaddq_u64_(a, b) } @@ -1111,6 +1206,7 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { vpaddq_s16_(a, b) } @@ -1118,6 +1214,7 @@ pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { transmute(vpaddq_s16_(transmute(a), transmute(b))) } @@ -1125,6 +1222,7 @@ pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { vpaddq_s32_(a, b) } @@ -1132,6 +1230,7 @@ pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { transmute(vpaddq_s32_(transmute(a), transmute(b))) } @@ -1139,6 +1238,7 @@ pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { vpaddq_s64_(a, b) } @@ -1146,6 +1246,7 @@ pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { transmute(vpaddq_s64_(transmute(a), transmute(b))) } @@ -1153,6 +1254,7 @@ pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { vpaddq_s8_(a, b) } @@ -1160,6 +1262,7 @@ pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { transmute(vpaddq_s8_(transmute(a), transmute(b))) } @@ -1167,6 +1270,7 @@ pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { transmute(vaddvq_u64_(transmute(a))) } @@ -1174,6 +1278,7 @@ pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { transmute(vaddvq_u64_(transmute(a))) } @@ -1182,97 +1287,124 @@ pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { vaddv_s16_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { vaddv_s32_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { vaddv_s8_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { vaddv_u16_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { vaddv_u32_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { vaddv_u8_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { vaddvq_s16_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { vaddvq_s32_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { vaddvq_s8_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { vaddvq_u16_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { vaddvq_u32_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { vaddvq_u8_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { vaddvq_s64_(a) } + /// Add across vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(addp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { vaddvq_u64_(a) } @@ -1281,27 +1413,34 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { vaddlv_s8_(a) as i16 } + /// Signed Add Long across Vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(saddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { vaddlvq_s8_(a) as i16 } + /// Unsigned Add Long across Vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { vaddlv_u8_(a) as u16 } + /// Unsigned Add Long across Vector #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uaddlv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { vaddlvq_u8_(a) as u16 } @@ -1310,6 +1449,7 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_add(a, b) } @@ -1318,6 +1458,7 @@ pub unsafe fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_add(a, b) } @@ -1326,6 +1467,7 @@ pub unsafe fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(add))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_add(a, b) } @@ -1334,6 +1476,7 @@ pub unsafe fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(add))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_add(a, b) } @@ -1342,6 +1485,7 @@ pub unsafe fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(add))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { a.wrapping_add(b) } @@ -1350,6 +1494,7 @@ pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(add))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { a.wrapping_add(b) } @@ -1358,6 +1503,7 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { vmaxv_s8_(a) } @@ -1366,6 +1512,7 @@ pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { vmaxvq_s8_(a) } @@ -1374,6 +1521,7 @@ pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { vmaxv_s16_(a) } @@ -1382,6 +1530,7 @@ pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { vmaxvq_s16_(a) } @@ -1390,6 +1539,7 @@ pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { vmaxv_s32_(a) } @@ -1398,6 +1548,7 @@ pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { vmaxvq_s32_(a) } @@ -1406,6 +1557,7 @@ pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { vmaxv_u8_(a) } @@ -1414,6 +1566,7 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { vmaxvq_u8_(a) } @@ -1422,6 +1575,7 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { vmaxv_u16_(a) } @@ -1430,6 +1584,7 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { vmaxvq_u16_(a) } @@ -1438,6 +1593,7 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { vmaxv_u32_(a) } @@ -1446,6 +1602,7 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { vmaxvq_u32_(a) } @@ -1454,6 +1611,7 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { vmaxv_f32_(a) } @@ -1462,6 +1620,7 @@ pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { vmaxvq_f32_(a) } @@ -1470,6 +1629,7 @@ pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { vmaxvq_f64_(a) } @@ -1478,6 +1638,7 @@ pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { vminv_s8_(a) } @@ -1486,6 +1647,7 @@ pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { vminvq_s8_(a) } @@ -1494,6 +1656,7 @@ pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { vminv_s16_(a) } @@ -1502,6 +1665,7 @@ pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { vminvq_s16_(a) } @@ -1510,6 +1674,7 @@ pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { vminv_s32_(a) } @@ -1518,6 +1683,7 @@ pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { vminvq_s32_(a) } @@ -1526,6 +1692,7 @@ pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { vminv_u8_(a) } @@ -1534,6 +1701,7 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { vminvq_u8_(a) } @@ -1542,6 +1710,7 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { vminv_u16_(a) } @@ -1550,6 +1719,7 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { vminvq_u16_(a) } @@ -1558,6 +1728,7 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { vminv_u32_(a) } @@ -1566,6 +1737,7 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { vminvq_u32_(a) } @@ -1574,6 +1746,7 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { vminv_f32_(a) } @@ -1582,6 +1755,7 @@ pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { vminvq_f32_(a) } @@ -1590,6 +1764,7 @@ pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { vminvq_f64_(a) } @@ -1598,6 +1773,7 @@ pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { vpminq_s8_(a, b) } @@ -1606,6 +1782,7 @@ pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { vpminq_s16_(a, b) } @@ -1614,6 +1791,7 @@ pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { vpminq_s32_(a, b) } @@ -1622,6 +1800,7 @@ pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { vpminq_u8_(a, b) } @@ -1630,6 +1809,7 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { vpminq_u16_(a, b) } @@ -1638,6 +1818,7 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { vpminq_u32_(a, b) } @@ -1646,6 +1827,7 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { vpminq_f32_(a, b) } @@ -1654,6 +1836,7 @@ pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { vpminq_f64_(a, b) } @@ -1662,6 +1845,7 @@ pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { vpmaxq_s8_(a, b) } @@ -1670,6 +1854,7 @@ pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { vpmaxq_s16_(a, b) } @@ -1678,6 +1863,7 @@ pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { vpmaxq_s32_(a, b) } @@ -1686,6 +1872,7 @@ pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { vpmaxq_u8_(a, b) } @@ -1694,6 +1881,7 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { vpmaxq_u16_(a, b) } @@ -1702,6 +1890,7 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { vpmaxq_u32_(a, b) } @@ -1710,6 +1899,7 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { vpmaxq_f32_(a, b) } @@ -1718,6 +1908,7 @@ pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { vpmaxq_f64_(a, b) } @@ -1727,10 +1918,9 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t { - if N != 0 { - unreachable_unchecked() - } + static_assert!(N : i32 where N == 0); a } @@ -1739,16 +1929,16 @@ pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_ #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64x1_t { - if N != 0 { - unreachable_unchecked() - } + static_assert!(N : i32 where N == 0); a } /// Vector combine #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { simd_shuffle16!( low, @@ -1761,6 +1951,7 @@ pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -1769,6 +1960,7 @@ pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { simd_shuffle4!(low, high, [0, 1, 2, 3]) } @@ -1777,6 +1969,7 @@ pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { simd_shuffle2!(low, high, [0, 1]) } @@ -1785,6 +1978,7 @@ pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { simd_shuffle16!( low, @@ -1797,6 +1991,7 @@ pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -1805,6 +2000,7 @@ pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { simd_shuffle4!(low, high, [0, 1, 2, 3]) } @@ -1813,6 +2009,7 @@ pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { simd_shuffle2!(low, high, [0, 1]) } @@ -1821,6 +2018,7 @@ pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { simd_shuffle2!(low, high, [0, 1]) } @@ -1829,6 +2027,7 @@ pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t { transmute(u64x1::new(value)) } @@ -1837,6 +2036,7 @@ pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t { float64x1_t(value) } @@ -1845,6 +2045,7 @@ pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t { transmute(u64x2::new(value, value)) } @@ -1853,6 +2054,7 @@ pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t { float64x2_t(value, value) } @@ -1861,6 +2063,7 @@ pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t { vdup_n_p64(value) } @@ -1869,6 +2072,7 @@ pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t { vdup_n_f64(value) } @@ -1877,6 +2081,7 @@ pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t { vdupq_n_p64(value) } @@ -1885,6 +2090,7 @@ pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t { vdupq_n_f64(value) } @@ -1893,6 +2099,7 @@ pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vget_high_f64(a: float64x2_t) -> float64x1_t { float64x1_t(simd_extract(a, 1)) } @@ -1901,6 +2108,7 @@ pub unsafe fn vget_high_f64(a: float64x2_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { transmute(u64x1::new(simd_extract(a, 1))) } @@ -1909,6 +2117,7 @@ pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vget_low_f64(a: float64x2_t) -> float64x1_t { float64x1_t(simd_extract(a, 0)) } @@ -1917,6 +2126,7 @@ pub unsafe fn vget_low_f64(a: float64x2_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { transmute(u64x1::new(simd_extract(a, 0))) } @@ -1925,6 +2135,7 @@ pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { #[inline] #[target_feature(enable = "neon")] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))] pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { static_assert!(IMM5 : i32 where IMM5 == 0); @@ -1935,6 +2146,7 @@ pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { #[inline] #[target_feature(enable = "neon")] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))] pub unsafe fn vgetq_lane_f64(v: float64x2_t) -> f64 { static_assert_imm1!(IMM5); @@ -1955,6 +2167,7 @@ pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_ #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { simd_shuffle4!(low, high, [0, 1, 2, 3]) } @@ -1963,6 +2176,7 @@ pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { simd_shuffle16!( low, @@ -1975,6 +2189,7 @@ pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -1983,6 +2198,7 @@ pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t { simd_shuffle2!(low, high, [0, 1]) } @@ -1992,6 +2208,7 @@ pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vqtbl1_s8(vcombine_s8(a, zeroed()), transmute(b)) } @@ -2001,6 +2218,7 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vqtbl1_u8(vcombine_u8(a, zeroed()), b) } @@ -2010,6 +2228,7 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { vqtbl1_p8(vcombine_p8(a, zeroed()), b) } @@ -2019,6 +2238,7 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { vqtbl1_s8(vcombine_s8(a.0, a.1), transmute(b)) } @@ -2028,6 +2248,7 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { vqtbl1_u8(vcombine_u8(a.0, a.1), b) } @@ -2037,6 +2258,7 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { vqtbl1_p8(vcombine_p8(a.0, a.1), b) } @@ -2046,6 +2268,7 @@ pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { vqtbl2_s8( int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, zeroed())), @@ -2058,6 +2281,7 @@ pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { vqtbl2_u8( uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, zeroed())), @@ -2070,6 +2294,7 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { vqtbl2_p8( poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, zeroed())), @@ -2082,6 +2307,7 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { vqtbl2_s8( int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)), @@ -2094,6 +2320,7 @@ pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { vqtbl2_u8( uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)), @@ -2106,6 +2333,7 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { vqtbl2_p8( poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)), @@ -2118,6 +2346,7 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let r = vqtbx1_s8(a, vcombine_s8(b, zeroed()), transmute(c)); let m: int8x8_t = simd_lt(c, transmute(i8x8::splat(8))); @@ -2129,6 +2358,7 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { let r = vqtbx1_u8(a, vcombine_u8(b, zeroed()), c); let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8))); @@ -2140,6 +2370,7 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { let r = vqtbx1_p8(a, vcombine_p8(b, zeroed()), c); let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8))); @@ -2151,6 +2382,7 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { vqtbx1_s8(a, vcombine_s8(b.0, b.1), transmute(c)) } @@ -2160,6 +2392,7 @@ pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { vqtbx1_u8(a, vcombine_u8(b.0, b.1), c) } @@ -2169,6 +2402,7 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { vqtbx1_p8(a, vcombine_p8(b.0, b.1), c) } @@ -2178,6 +2412,7 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { let r = vqtbx2_s8( a, @@ -2193,6 +2428,7 @@ pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { let r = vqtbx2_u8( a, @@ -2208,6 +2444,7 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { let r = vqtbx2_p8( a, @@ -2223,6 +2460,7 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { vqtbx2_s8( a, @@ -2236,6 +2474,7 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { vqtbx2_u8( a, @@ -2249,6 +2488,7 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { vqtbx2_p8( a, @@ -2262,94 +2502,117 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1_s8(t: int8x16_t, idx: uint8x8_t) -> int8x8_t { vqtbl1(t, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1q_s8(t: int8x16_t, idx: uint8x16_t) -> int8x16_t { vqtbl1q(t, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1_u8(t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbl1(transmute(t), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1q_u8(t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbl1q(transmute(t), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1_p8(t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbl1(transmute(t), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl1q_p8(t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbl1q(transmute(t), transmute(idx))) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1_s8(a: int8x8_t, t: int8x16_t, idx: uint8x8_t) -> int8x8_t { vqtbx1(a, t, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1q_s8(a: int8x16_t, t: int8x16_t, idx: uint8x16_t) -> int8x16_t { vqtbx1q(a, t, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1_u8(a: uint8x8_t, t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbx1(transmute(a), transmute(t), transmute(idx))) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1q_u8(a: uint8x16_t, t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbx1q(transmute(a), transmute(t), transmute(idx))) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1_p8(a: poly8x8_t, t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbx1(transmute(a), transmute(t), transmute(idx))) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx1q_p8(a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbx1q(transmute(a), transmute(t), transmute(idx))) } @@ -2359,70 +2622,87 @@ pub unsafe fn vqtbx1q_p8(a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t) -> poly8 #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2_s8(t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t { vqtbl2(t.0, t.1, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2q_s8(t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t { vqtbl2q(t.0, t.1, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2_u8(t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbl2(transmute(t.0), transmute(t.1), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2q_u8(t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbl2q(transmute(t.0), transmute(t.1), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2_p8(t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbl2(transmute(t.0), transmute(t.1), transmute(idx))) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl2q_p8(t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbl2q(transmute(t.0), transmute(t.1), transmute(idx))) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2_s8(a: int8x8_t, t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t { vqtbx2(a, t.0, t.1, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2q_s8(a: int8x16_t, t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t { vqtbx2q(a, t.0, t.1, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2_u8(a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbx2( transmute(a), @@ -2431,11 +2711,13 @@ pub unsafe fn vqtbx2_u8(a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t) -> uint8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2q_u8(a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbx2q( transmute(a), @@ -2444,11 +2726,13 @@ pub unsafe fn vqtbx2q_u8(a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t) -> uin transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2_p8(a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbx2( transmute(a), @@ -2457,11 +2741,13 @@ pub unsafe fn vqtbx2_p8(a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t) -> poly8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx2q_p8(a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbx2q( transmute(a), @@ -2476,22 +2762,27 @@ pub unsafe fn vqtbx2q_p8(a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t) -> pol #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3_s8(t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t { vqtbl3(t.0, t.1, t.2, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3q_s8(t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t { vqtbl3q(t.0, t.1, t.2, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbl3( transmute(t.0), @@ -2500,11 +2791,13 @@ pub unsafe fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbl3q( transmute(t.0), @@ -2513,11 +2806,13 @@ pub unsafe fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbl3( transmute(t.0), @@ -2526,11 +2821,13 @@ pub unsafe fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbl3q( transmute(t.0), @@ -2539,27 +2836,33 @@ pub unsafe fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3_s8(a: int8x8_t, t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t { vqtbx3(a, t.0, t.1, t.2, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3q_s8(a: int8x16_t, t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t { vqtbx3q(a, t.0, t.1, t.2, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3_u8(a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbx3( transmute(a), @@ -2569,11 +2872,13 @@ pub unsafe fn vqtbx3_u8(a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t) -> uint8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3q_u8(a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbx3q( transmute(a), @@ -2583,11 +2888,13 @@ pub unsafe fn vqtbx3q_u8(a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t) -> uin transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3_p8(a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbx3( transmute(a), @@ -2597,11 +2904,13 @@ pub unsafe fn vqtbx3_p8(a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t) -> poly8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx3q_p8(a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbx3q( transmute(a), @@ -2617,22 +2926,27 @@ pub unsafe fn vqtbx3q_p8(a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t) -> pol #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4_s8(t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t { vqtbl4(t.0, t.1, t.2, t.3, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4q_s8(t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t { vqtbl4q(t.0, t.1, t.2, t.3, idx) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbl4( transmute(t.0), @@ -2642,11 +2956,13 @@ pub unsafe fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbl4q( transmute(t.0), @@ -2656,11 +2972,13 @@ pub unsafe fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbl4( transmute(t.0), @@ -2670,11 +2988,13 @@ pub unsafe fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { transmute(idx), )) } + /// Table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbl4q( transmute(t.0), @@ -2684,27 +3004,33 @@ pub unsafe fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4_s8(a: int8x8_t, t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t { vqtbx4(a, t.0, t.1, t.2, t.3, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4q_s8(a: int8x16_t, t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t { vqtbx4q(a, t.0, t.1, t.2, t.3, idx) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4_u8(a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { transmute(vqtbx4( transmute(a), @@ -2715,11 +3041,13 @@ pub unsafe fn vqtbx4_u8(a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t) -> uint8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4q_u8(a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { transmute(vqtbx4q( transmute(a), @@ -2730,11 +3058,13 @@ pub unsafe fn vqtbx4q_u8(a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t) -> uin transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4_p8(a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { transmute(vqtbx4( transmute(a), @@ -2745,11 +3075,13 @@ pub unsafe fn vqtbx4_p8(a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t) -> poly8x transmute(idx), )) } + /// Extended table look-up #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { transmute(vqtbx4q( transmute(a), @@ -2766,6 +3098,7 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> pol #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_n_s64(a: i64) -> i64 { static_assert_imm6!(N); a << N @@ -2776,6 +3109,7 @@ pub unsafe fn vshld_n_s64(a: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_n_u64(a: u64) -> u64 { static_assert_imm6!(N); a << N @@ -2786,6 +3120,7 @@ pub unsafe fn vshld_n_u64(a: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrd_n_s64(a: i64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -2797,6 +3132,7 @@ pub unsafe fn vshrd_n_s64(a: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrd_n_u64(a: u64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { @@ -2812,6 +3148,7 @@ pub unsafe fn vshrd_n_u64(a: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); a.wrapping_add(vshrd_n_s64::(b)) @@ -2822,6 +3159,7 @@ pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); a.wrapping_add(vshrd_n_u64::(b)) @@ -2832,6 +3170,7 @@ pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); vsli_n_s8_(a, b, N) @@ -2841,6 +3180,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm3!(N); vsliq_n_s8_(a, b, N) @@ -2850,6 +3190,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm4!(N); vsli_n_s16_(a, b, N) @@ -2859,6 +3200,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm4!(N); vsliq_n_s16_(a, b, N) @@ -2868,6 +3210,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); vsli_n_s32_(a, b, N) @@ -2877,6 +3220,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); vsliq_n_s32_(a, b, N) @@ -2886,6 +3230,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N: i32 where N >= 0 && N <= 63); vsli_n_s64_(a, b, N) @@ -2895,6 +3240,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N: i32 where N >= 0 && N <= 63); vsliq_n_s64_(a, b, N) @@ -2904,6 +3250,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) @@ -2913,6 +3260,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) @@ -2922,6 +3270,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) @@ -2931,6 +3280,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) @@ -2940,6 +3290,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vsli_n_s32_(transmute(a), transmute(b), N)) @@ -2949,6 +3300,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) @@ -2958,6 +3310,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsli_n_s64_(transmute(a), transmute(b), N)) @@ -2967,6 +3320,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) @@ -2976,6 +3330,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) @@ -2985,6 +3340,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm3!(N); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) @@ -2994,6 +3350,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm4!(N); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) @@ -3003,6 +3360,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm4!(N); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) @@ -3012,6 +3370,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsli_n_s64_(transmute(a), transmute(b), N)) @@ -3021,6 +3380,7 @@ pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) @@ -3030,6 +3390,7 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); vsri_n_s8_(a, b, N) @@ -3039,6 +3400,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); vsriq_n_s8_(a, b, N) @@ -3048,6 +3410,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); vsri_n_s16_(a, b, N) @@ -3057,6 +3420,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); vsriq_n_s16_(a, b, N) @@ -3066,6 +3430,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N: i32 where N >= 1 && N <= 32); vsri_n_s32_(a, b, N) @@ -3075,6 +3440,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N: i32 where N >= 1 && N <= 32); vsriq_n_s32_(a, b, N) @@ -3084,6 +3450,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N: i32 where N >= 1 && N <= 64); vsri_n_s64_(a, b, N) @@ -3093,6 +3460,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N: i32 where N >= 1 && N <= 64); vsriq_n_s64_(a, b, N) @@ -3102,6 +3470,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) @@ -3111,6 +3480,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) @@ -3120,6 +3490,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) @@ -3129,6 +3500,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) @@ -3138,6 +3510,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N: i32 where N >= 1 && N <= 32); transmute(vsri_n_s32_(transmute(a), transmute(b), N)) @@ -3147,6 +3520,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N: i32 where N >= 1 && N <= 32); transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) @@ -3156,6 +3530,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsri_n_s64_(transmute(a), transmute(b), N)) @@ -3165,6 +3540,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) @@ -3174,6 +3550,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) @@ -3183,6 +3560,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) @@ -3192,6 +3570,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) @@ -3201,6 +3580,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) @@ -3210,6 +3590,7 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsri_n_s64_(transmute(a), transmute(b), N)) @@ -3219,6 +3600,7 @@ pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 62e99f9f52..7837307e44 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -15,6 +15,7 @@ use stdarch_test::assert_instr; #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_and(a, b) } @@ -25,6 +26,7 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_and(a, b) } @@ -35,6 +37,7 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_and(a, b) } @@ -45,6 +48,7 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_and(a, b) } @@ -55,6 +59,7 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_and(a, b) } @@ -65,6 +70,7 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_and(a, b) } @@ -75,6 +81,7 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_and(a, b) } @@ -85,6 +92,7 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_and(a, b) } @@ -95,6 +103,7 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_and(a, b) } @@ -105,6 +114,7 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_and(a, b) } @@ -115,6 +125,7 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_and(a, b) } @@ -125,6 +136,7 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_and(a, b) } @@ -135,6 +147,7 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_and(a, b) } @@ -145,6 +158,7 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_and(a, b) } @@ -155,6 +169,7 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_and(a, b) } @@ -165,6 +180,7 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_and(a, b) } @@ -175,6 +191,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_or(a, b) } @@ -185,6 +202,7 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_or(a, b) } @@ -195,6 +213,7 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_or(a, b) } @@ -205,6 +224,7 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_or(a, b) } @@ -215,6 +235,7 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_or(a, b) } @@ -225,6 +246,7 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_or(a, b) } @@ -235,6 +257,7 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_or(a, b) } @@ -245,6 +268,7 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_or(a, b) } @@ -255,6 +279,7 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_or(a, b) } @@ -265,6 +290,7 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_or(a, b) } @@ -275,6 +301,7 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_or(a, b) } @@ -285,6 +312,7 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_or(a, b) } @@ -295,6 +323,7 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_or(a, b) } @@ -305,6 +334,7 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_or(a, b) } @@ -315,6 +345,7 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_or(a, b) } @@ -325,6 +356,7 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_or(a, b) } @@ -335,6 +367,7 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_xor(a, b) } @@ -345,6 +378,7 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_xor(a, b) } @@ -355,6 +389,7 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_xor(a, b) } @@ -365,6 +400,7 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_xor(a, b) } @@ -375,6 +411,7 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_xor(a, b) } @@ -385,6 +422,7 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_xor(a, b) } @@ -395,6 +433,7 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_xor(a, b) } @@ -405,6 +444,7 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_xor(a, b) } @@ -415,6 +455,7 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_xor(a, b) } @@ -425,6 +466,7 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_xor(a, b) } @@ -435,6 +477,7 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_xor(a, b) } @@ -445,6 +488,7 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_xor(a, b) } @@ -455,6 +499,7 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_xor(a, b) } @@ -465,6 +510,7 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_xor(a, b) } @@ -475,6 +521,7 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_xor(a, b) } @@ -485,6 +532,7 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_xor(a, b) } @@ -495,6 +543,7 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -511,6 +560,7 @@ vabd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -527,6 +577,7 @@ vabdq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -543,6 +594,7 @@ vabd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -559,6 +611,7 @@ vabdq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -575,6 +628,7 @@ vabd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -591,6 +645,7 @@ vabdq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -607,6 +662,7 @@ vabd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -623,6 +679,7 @@ vabdq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -639,6 +696,7 @@ vabd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -655,6 +713,7 @@ vabdq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -671,6 +730,7 @@ vabd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -687,6 +747,7 @@ vabdq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -703,6 +764,7 @@ vabd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -719,6 +781,7 @@ vabdq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { simd_cast(vabd_u8(a, b)) } @@ -729,6 +792,7 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { simd_cast(vabd_u16(a, b)) } @@ -739,6 +803,7 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { simd_cast(vabd_u32(a, b)) } @@ -749,6 +814,7 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: uint8x8_t = simd_cast(vabd_s8(a, b)); simd_cast(c) @@ -760,6 +826,7 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: uint16x4_t = simd_cast(vabd_s16(a, b)); simd_cast(c) @@ -771,6 +838,7 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: uint32x2_t = simd_cast(vabd_s32(a, b)); simd_cast(c) @@ -782,6 +850,7 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -792,6 +861,7 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -802,6 +872,7 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -812,6 +883,7 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -822,6 +894,7 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -832,6 +905,7 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -842,6 +916,7 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -852,6 +927,7 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -862,6 +938,7 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -872,6 +949,7 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -882,6 +960,7 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -892,6 +971,7 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -902,6 +982,7 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -912,6 +993,7 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -922,6 +1004,7 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -932,6 +1015,7 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -942,6 +1026,7 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let c: int8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -954,6 +1039,7 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { let c: int8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -966,6 +1052,7 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let c: int16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -978,6 +1065,7 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let c: int16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -990,6 +1078,7 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let c: int32x2_t = simd_and(a, b); let d: i32x2 = i32x2::new(0, 0); @@ -1002,6 +1091,7 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let c: int32x4_t = simd_and(a, b); let d: i32x4 = i32x4::new(0, 0, 0, 0); @@ -1014,6 +1104,7 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let c: poly8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1026,6 +1117,7 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { let c: poly8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1038,6 +1130,7 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let c: poly16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1050,6 +1143,7 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let c: poly16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1062,6 +1156,7 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c: uint8x8_t = simd_and(a, b); let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1074,6 +1169,7 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c: uint8x16_t = simd_and(a, b); let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1086,6 +1182,7 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c: uint16x4_t = simd_and(a, b); let d: u16x4 = u16x4::new(0, 0, 0, 0); @@ -1098,6 +1195,7 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c: uint16x8_t = simd_and(a, b); let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1110,6 +1208,7 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c: uint32x2_t = simd_and(a, b); let d: u32x2 = u32x2::new(0, 0); @@ -1122,6 +1221,7 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c: uint32x4_t = simd_and(a, b); let d: u32x4 = u32x4::new(0, 0, 0, 0); @@ -1134,6 +1234,7 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { simd_fabs(a) } @@ -1144,6 +1245,7 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { simd_fabs(a) } @@ -1154,6 +1256,7 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1164,6 +1267,7 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1174,6 +1278,7 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1184,6 +1289,7 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1194,6 +1300,7 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1204,6 +1311,7 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1214,6 +1322,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1224,6 +1333,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1234,6 +1344,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1244,6 +1355,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1254,6 +1366,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1264,6 +1377,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1274,6 +1388,7 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1284,6 +1399,7 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1294,6 +1410,7 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1304,6 +1421,7 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1314,6 +1432,7 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1324,6 +1443,7 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1334,6 +1454,7 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1344,6 +1465,7 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1354,6 +1476,7 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1364,6 +1487,7 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1374,6 +1498,7 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1384,6 +1509,7 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1394,6 +1520,7 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1404,6 +1531,7 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1414,6 +1542,7 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1424,6 +1553,7 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1434,6 +1564,7 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1444,6 +1575,7 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1454,6 +1586,7 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1464,6 +1597,7 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1474,6 +1608,7 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1484,6 +1619,7 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1494,6 +1630,7 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1504,6 +1641,7 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1514,6 +1652,7 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1524,6 +1663,7 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1534,6 +1674,7 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1544,6 +1685,7 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1554,6 +1696,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1564,6 +1707,7 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1574,6 +1718,7 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -1584,6 +1729,7 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -1594,6 +1740,7 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -1604,6 +1751,7 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -1614,6 +1762,7 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -1624,6 +1773,7 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -1634,6 +1784,7 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -1644,6 +1795,7 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -1654,6 +1806,7 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -1664,6 +1817,7 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -1674,6 +1828,7 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -1684,6 +1839,7 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -1694,6 +1850,7 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -1704,6 +1861,7 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -1714,6 +1872,7 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1730,6 +1889,7 @@ vcls_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1746,6 +1906,7 @@ vclsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1762,6 +1923,7 @@ vcls_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1778,6 +1940,7 @@ vclsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1794,6 +1957,7 @@ vcls_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -1810,6 +1974,7 @@ vclsq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { transmute(vcls_s8(transmute(a))) } @@ -1820,6 +1985,7 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { transmute(vclsq_s8(transmute(a))) } @@ -1830,6 +1996,7 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { transmute(vcls_s16(transmute(a))) } @@ -1840,6 +2007,7 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { transmute(vclsq_s16(transmute(a))) } @@ -1850,6 +2018,7 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { transmute(vcls_s32(transmute(a))) } @@ -1860,6 +2029,7 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { transmute(vclsq_s32(transmute(a))) } @@ -1870,6 +2040,7 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { vclz_s8_(a) } @@ -1880,6 +2051,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { vclzq_s8_(a) } @@ -1890,6 +2062,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { vclz_s16_(a) } @@ -1900,6 +2073,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { vclzq_s16_(a) } @@ -1910,6 +2084,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { vclz_s32_(a) } @@ -1920,6 +2095,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { vclzq_s32_(a) } @@ -1930,6 +2106,7 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { transmute(vclz_s8_(transmute(a))) } @@ -1940,6 +2117,7 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vclzq_s8_(transmute(a))) } @@ -1950,6 +2128,7 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { transmute(vclz_s16_(transmute(a))) } @@ -1960,6 +2139,7 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { transmute(vclzq_s16_(transmute(a))) } @@ -1970,6 +2150,7 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { transmute(vclz_s32_(transmute(a))) } @@ -1980,6 +2161,7 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { transmute(vclzq_s32_(transmute(a))) } @@ -1990,6 +2172,7 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2006,6 +2189,7 @@ vcagt_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2022,6 +2206,7 @@ vcagtq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2038,6 +2223,7 @@ vcage_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2054,6 +2240,7 @@ vcageq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcagt_f32(b, a) } @@ -2064,6 +2251,7 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcagtq_f32(b, a) } @@ -2074,6 +2262,7 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcage_f32(b, a) } @@ -2084,6 +2273,7 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcageq_f32(b, a) } @@ -2094,6 +2284,7 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { transmute(a) } @@ -2104,6 +2295,7 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { transmute(a) } @@ -2114,6 +2306,7 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { transmute(a) } @@ -2124,6 +2317,7 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { transmute(a) } @@ -2134,6 +2328,7 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { transmute(a) } @@ -2144,6 +2339,7 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { transmute(a) } @@ -2154,6 +2350,7 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { transmute(a) } @@ -2164,6 +2361,7 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { transmute(a) } @@ -2174,6 +2372,7 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { transmute(a) } @@ -2184,6 +2383,7 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { transmute(a) } @@ -2194,6 +2394,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { transmute(a) } @@ -2204,6 +2405,7 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { transmute(a) } @@ -2214,6 +2416,7 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { simd_cast(a) } @@ -2224,6 +2427,7 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { simd_cast(a) } @@ -2234,6 +2438,7 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { simd_cast(a) } @@ -2244,6 +2449,7 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { simd_cast(a) } @@ -2252,7 +2458,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2268,8 +2474,9 @@ vcvt_n_f32_s32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf, N = 2))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2284,7 +2491,7 @@ vcvt_n_f32_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2300,8 +2507,9 @@ vcvtq_n_f32_s32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf, N = 2))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2316,7 +2524,7 @@ vcvtq_n_f32_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2332,8 +2540,9 @@ vcvt_n_f32_u32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf, N = 2))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2348,7 +2557,7 @@ vcvt_n_f32_u32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2364,8 +2573,9 @@ vcvtq_n_f32_u32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf, N = 2))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2380,7 +2590,7 @@ vcvtq_n_f32_u32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2396,8 +2606,9 @@ vcvt_n_s32_f32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs, N = 2))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2412,7 +2623,7 @@ vcvt_n_s32_f32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2428,8 +2639,9 @@ vcvtq_n_s32_f32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs, N = 2))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2444,7 +2656,7 @@ vcvtq_n_s32_f32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2460,8 +2672,9 @@ vcvt_n_u32_f32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu, N = 2))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2476,7 +2689,7 @@ vcvt_n_u32_f32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt, N = 2))] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -2492,8 +2705,9 @@ vcvtq_n_u32_f32_(a, N) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu, N = 2))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2510,6 +2724,7 @@ vcvtq_n_u32_f32_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2526,6 +2741,7 @@ vcvt_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2542,6 +2758,7 @@ vcvtq_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2558,6 +2775,7 @@ vcvt_u32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2575,6 +2793,7 @@ vcvtq_u32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2587,6 +2806,7 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2599,6 +2819,7 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2611,6 +2832,7 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2623,6 +2845,7 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2635,6 +2858,7 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2647,6 +2871,7 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2659,6 +2884,7 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2671,6 +2897,7 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2683,6 +2910,7 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2695,6 +2923,7 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2707,6 +2936,7 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2719,6 +2949,7 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2731,6 +2962,7 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2743,6 +2975,7 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2755,6 +2988,7 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2767,6 +3001,7 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2779,6 +3014,7 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2791,6 +3027,7 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2803,6 +3040,7 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2815,6 +3053,7 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2827,6 +3066,7 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2839,6 +3079,7 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2851,6 +3092,7 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2863,6 +3105,7 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2875,6 +3118,7 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2887,6 +3131,7 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2899,6 +3144,7 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2911,6 +3157,7 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2923,6 +3170,7 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -2935,6 +3183,7 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2947,6 +3196,7 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -2959,6 +3209,7 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2971,6 +3222,7 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2983,6 +3235,7 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -2995,6 +3248,7 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3007,6 +3261,7 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3019,6 +3274,7 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3031,6 +3287,7 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, [N as u32, N as u32]) @@ -3043,6 +3300,7 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3055,6 +3313,7 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N == 0); a @@ -3067,6 +3326,7 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N == 0); a @@ -3079,6 +3339,7 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3091,6 +3352,7 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { static_assert_imm1!(N); transmute::(simd_extract(a, N as u32)) @@ -3103,6 +3365,7 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3125,6 +3388,7 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -3155,6 +3419,7 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3173,6 +3438,7 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3195,6 +3461,7 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3211,6 +3478,7 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3229,6 +3497,7 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3251,6 +3520,7 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -3281,6 +3551,7 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3299,6 +3570,7 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3321,6 +3593,7 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3337,6 +3610,7 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3355,6 +3629,7 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3377,6 +3652,7 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -3407,6 +3683,7 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3425,6 +3702,7 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3447,6 +3725,7 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3463,6 +3742,7 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3479,6 +3759,7 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -3495,6 +3776,7 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3512,6 +3794,7 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, simd_mul(b, c)) } @@ -3522,6 +3805,7 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, simd_mul(b, c)) } @@ -3532,6 +3816,7 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, simd_mul(b, c)) } @@ -3542,6 +3827,7 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, simd_mul(b, c)) } @@ -3552,6 +3838,7 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, simd_mul(b, c)) } @@ -3562,6 +3849,7 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, simd_mul(b, c)) } @@ -3572,6 +3860,7 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, simd_mul(b, c)) } @@ -3582,6 +3871,7 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, simd_mul(b, c)) } @@ -3592,6 +3882,7 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, simd_mul(b, c)) } @@ -3602,6 +3893,7 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, simd_mul(b, c)) } @@ -3612,6 +3904,7 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, simd_mul(b, c)) } @@ -3622,6 +3915,7 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, simd_mul(b, c)) } @@ -3632,6 +3926,7 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_add(a, simd_mul(b, c)) } @@ -3642,6 +3937,7 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_add(a, simd_mul(b, c)) } @@ -3652,6 +3948,7 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmla_s16(a, b, vdup_n_s16(c)) } @@ -3662,6 +3959,7 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlaq_s16(a, b, vdupq_n_s16(c)) } @@ -3672,6 +3970,7 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmla_s32(a, b, vdup_n_s32(c)) } @@ -3682,6 +3981,7 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlaq_s32(a, b, vdupq_n_s32(c)) } @@ -3692,6 +3992,7 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmla_u16(a, b, vdup_n_u16(c)) } @@ -3702,6 +4003,7 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlaq_u16(a, b, vdupq_n_u16(c)) } @@ -3712,6 +4014,7 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmla_u32(a, b, vdup_n_u32(c)) } @@ -3722,6 +4025,7 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlaq_u32(a, b, vdupq_n_u32(c)) } @@ -3732,6 +4036,7 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmla_f32(a, b, vdup_n_f32(c)) } @@ -3742,6 +4047,7 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlaq_f32(a, b, vdupq_n_f32(c)) } @@ -3753,6 +4059,7 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3765,6 +4072,7 @@ pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3777,6 +4085,7 @@ pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3789,6 +4098,7 @@ pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3801,6 +4111,7 @@ pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3813,6 +4124,7 @@ pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3825,6 +4137,7 @@ pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3837,6 +4150,7 @@ pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3849,6 +4163,7 @@ pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3861,6 +4176,7 @@ pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3873,6 +4189,7 @@ pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3885,6 +4202,7 @@ pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3897,6 +4215,7 @@ pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3909,6 +4228,7 @@ pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3921,6 +4241,7 @@ pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3933,6 +4254,7 @@ pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3945,6 +4267,7 @@ pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3957,6 +4280,7 @@ pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -3969,6 +4293,7 @@ pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3981,6 +4306,7 @@ pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -3992,6 +4318,7 @@ pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, vmull_s8(b, c)) } @@ -4002,6 +4329,7 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, vmull_s16(b, c)) } @@ -4012,6 +4340,7 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_add(a, vmull_s32(b, c)) } @@ -4022,6 +4351,7 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_add(a, vmull_u8(b, c)) } @@ -4032,6 +4362,7 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_add(a, vmull_u16(b, c)) } @@ -4042,6 +4373,7 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_add(a, vmull_u32(b, c)) } @@ -4052,6 +4384,7 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlal_s16(a, b, vdup_n_s16(c)) } @@ -4062,6 +4395,7 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlal_s32(a, b, vdup_n_s32(c)) } @@ -4072,6 +4406,7 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlal_u16(a, b, vdup_n_u16(c)) } @@ -4082,6 +4417,7 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlal_u32(a, b, vdup_n_u32(c)) } @@ -4093,6 +4429,7 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4105,6 +4442,7 @@ pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4117,6 +4455,7 @@ pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4129,6 +4468,7 @@ pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4141,6 +4481,7 @@ pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4153,6 +4494,7 @@ pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4165,6 +4507,7 @@ pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4177,6 +4520,7 @@ pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4188,6 +4532,7 @@ pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -4198,6 +4543,7 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -4208,6 +4554,7 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -4218,6 +4565,7 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -4228,6 +4576,7 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -4238,6 +4587,7 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -4248,6 +4598,7 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -4258,6 +4609,7 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -4268,6 +4620,7 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -4278,6 +4631,7 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -4288,6 +4642,7 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -4298,6 +4653,7 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -4308,6 +4664,7 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -4318,6 +4675,7 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -4328,6 +4686,7 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmls_s16(a, b, vdup_n_s16(c)) } @@ -4338,6 +4697,7 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlsq_s16(a, b, vdupq_n_s16(c)) } @@ -4348,6 +4708,7 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmls_s32(a, b, vdup_n_s32(c)) } @@ -4358,6 +4719,7 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlsq_s32(a, b, vdupq_n_s32(c)) } @@ -4368,6 +4730,7 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmls_u16(a, b, vdup_n_u16(c)) } @@ -4378,6 +4741,7 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlsq_u16(a, b, vdupq_n_u16(c)) } @@ -4388,6 +4752,7 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmls_u32(a, b, vdup_n_u32(c)) } @@ -4398,6 +4763,7 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlsq_u32(a, b, vdupq_n_u32(c)) } @@ -4408,6 +4774,7 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmls_f32(a, b, vdup_n_f32(c)) } @@ -4418,6 +4785,7 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlsq_f32(a, b, vdupq_n_f32(c)) } @@ -4429,6 +4797,7 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4441,6 +4810,7 @@ pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4453,6 +4823,7 @@ pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4465,6 +4836,7 @@ pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4477,6 +4849,7 @@ pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4489,6 +4862,7 @@ pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4501,6 +4875,7 @@ pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4513,6 +4888,7 @@ pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4525,6 +4901,7 @@ pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4537,6 +4914,7 @@ pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4549,6 +4927,7 @@ pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4561,6 +4940,7 @@ pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4573,6 +4953,7 @@ pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4585,6 +4966,7 @@ pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4597,6 +4979,7 @@ pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4609,6 +4992,7 @@ pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4621,6 +5005,7 @@ pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4633,6 +5018,7 @@ pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4645,6 +5031,7 @@ pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4657,6 +5044,7 @@ pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4668,6 +5056,7 @@ pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_sub(a, vmull_s8(b, c)) } @@ -4678,6 +5067,7 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_sub(a, vmull_s16(b, c)) } @@ -4688,6 +5078,7 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_sub(a, vmull_s32(b, c)) } @@ -4698,6 +5089,7 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_sub(a, vmull_u8(b, c)) } @@ -4708,6 +5100,7 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_sub(a, vmull_u16(b, c)) } @@ -4718,6 +5111,7 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_sub(a, vmull_u32(b, c)) } @@ -4728,6 +5122,7 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlsl_s16(a, b, vdup_n_s16(c)) } @@ -4738,6 +5133,7 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlsl_s32(a, b, vdup_n_s32(c)) } @@ -4748,6 +5144,7 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlsl_u16(a, b, vdup_n_u16(c)) } @@ -4758,6 +5155,7 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlsl_u32(a, b, vdup_n_u32(c)) } @@ -4769,6 +5167,7 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4781,6 +5180,7 @@ pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4793,6 +5193,7 @@ pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4805,6 +5206,7 @@ pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4817,6 +5219,7 @@ pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4829,6 +5232,7 @@ pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4841,6 +5245,7 @@ pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4853,6 +5258,7 @@ pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, [LANE as u32, LANE as u32])) @@ -4864,6 +5270,7 @@ pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { simd_neg(a) } @@ -4874,6 +5281,7 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { simd_neg(a) } @@ -4884,6 +5292,7 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { simd_neg(a) } @@ -4894,6 +5303,7 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { simd_neg(a) } @@ -4904,6 +5314,7 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { simd_neg(a) } @@ -4914,6 +5325,7 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { simd_neg(a) } @@ -4924,6 +5336,7 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { simd_neg(a) } @@ -4934,6 +5347,7 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { simd_neg(a) } @@ -4944,6 +5358,7 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4960,6 +5375,7 @@ vqneg_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4976,6 +5392,7 @@ vqnegq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -4992,6 +5409,7 @@ vqneg_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5008,6 +5426,7 @@ vqnegq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5024,6 +5443,7 @@ vqneg_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5040,6 +5460,7 @@ vqnegq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5056,6 +5477,7 @@ vqsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5072,6 +5494,7 @@ vqsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5088,6 +5511,7 @@ vqsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5104,6 +5528,7 @@ vqsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5120,6 +5545,7 @@ vqsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5136,6 +5562,7 @@ vqsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5152,6 +5579,7 @@ vqsub_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5168,6 +5596,7 @@ vqsubq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5184,6 +5613,7 @@ vqsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5200,6 +5630,7 @@ vqsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5216,6 +5647,7 @@ vqsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5232,6 +5664,7 @@ vqsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5248,6 +5681,7 @@ vqsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5264,6 +5698,7 @@ vqsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5280,6 +5715,7 @@ vqsub_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5296,6 +5732,7 @@ vqsubq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5312,6 +5749,7 @@ vhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5328,6 +5766,7 @@ vhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5344,6 +5783,7 @@ vhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5360,6 +5800,7 @@ vhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5376,6 +5817,7 @@ vhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5392,6 +5834,7 @@ vhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5408,6 +5851,7 @@ vhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5424,6 +5868,7 @@ vhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5440,6 +5885,7 @@ vhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5456,6 +5902,7 @@ vhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5472,6 +5919,7 @@ vhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5488,6 +5936,7 @@ vhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5504,6 +5953,7 @@ vrhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5520,6 +5970,7 @@ vrhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5536,6 +5987,7 @@ vrhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5552,6 +6004,7 @@ vrhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5568,6 +6021,7 @@ vrhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5584,6 +6038,7 @@ vrhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5600,6 +6055,7 @@ vrhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5616,6 +6072,7 @@ vrhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5632,6 +6089,7 @@ vrhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5648,6 +6106,7 @@ vrhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5664,6 +6123,7 @@ vrhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5680,6 +6140,7 @@ vrhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5696,6 +6157,7 @@ vrndn_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5712,6 +6174,7 @@ vrndnq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5728,6 +6191,7 @@ vqadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5744,6 +6208,7 @@ vqaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5760,6 +6225,7 @@ vqadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5776,6 +6242,7 @@ vqaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5792,6 +6259,7 @@ vqadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5808,6 +6276,7 @@ vqaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5824,6 +6293,7 @@ vqadd_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5840,6 +6310,7 @@ vqaddq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5856,6 +6327,7 @@ vqadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5872,6 +6344,7 @@ vqaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5888,6 +6361,7 @@ vqadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5904,6 +6378,7 @@ vqaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5920,6 +6395,7 @@ vqadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5936,6 +6412,7 @@ vqaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5952,6 +6429,7 @@ vqadd_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5968,6 +6446,7 @@ vqaddq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -5984,6 +6463,7 @@ vld1_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6000,6 +6480,7 @@ vld1_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6016,6 +6497,7 @@ vld1_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6032,6 +6514,7 @@ vld1_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6048,6 +6531,7 @@ vld1q_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6064,6 +6548,7 @@ vld1q_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6080,6 +6565,7 @@ vld1q_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6096,6 +6582,7 @@ vld1q_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6112,6 +6599,7 @@ vld1_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6128,6 +6616,7 @@ vld1_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6144,6 +6633,7 @@ vld1_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6160,6 +6650,7 @@ vld1_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6176,6 +6667,7 @@ vld1q_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6192,6 +6684,7 @@ vld1q_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6208,6 +6701,7 @@ vld1q_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6224,6 +6718,7 @@ vld1q_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6240,6 +6735,7 @@ vld1_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6256,6 +6752,7 @@ vld1_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6272,6 +6769,7 @@ vld1_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6288,6 +6786,7 @@ vld1_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6304,6 +6803,7 @@ vld1q_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6320,6 +6820,7 @@ vld1q_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6336,6 +6837,7 @@ vld1q_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6352,6 +6854,7 @@ vld1q_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -6362,6 +6865,7 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -6372,6 +6876,7 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { transmute(vld1_s32_x2(transmute(a))) } @@ -6382,6 +6887,7 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -6392,6 +6898,7 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -6402,6 +6909,7 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -6412,6 +6920,7 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { transmute(vld1q_s32_x2(transmute(a))) } @@ -6422,6 +6931,7 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -6432,6 +6942,7 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -6442,6 +6953,7 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -6452,6 +6964,7 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { transmute(vld1_s32_x3(transmute(a))) } @@ -6462,6 +6975,7 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -6472,6 +6986,7 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -6482,6 +6997,7 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -6492,6 +7008,7 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { transmute(vld1q_s32_x3(transmute(a))) } @@ -6502,6 +7019,7 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -6512,6 +7030,7 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -6522,6 +7041,7 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -6532,6 +7052,7 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { transmute(vld1_s32_x4(transmute(a))) } @@ -6542,6 +7063,7 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -6552,6 +7074,7 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -6562,6 +7085,7 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -6572,6 +7096,7 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { transmute(vld1q_s32_x4(transmute(a))) } @@ -6582,6 +7107,7 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -6592,6 +7118,7 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -6602,6 +7129,7 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -6612,6 +7140,7 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -6622,6 +7151,7 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -6632,6 +7162,7 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -6642,6 +7173,7 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -6652,6 +7184,7 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -6662,6 +7195,7 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -6672,6 +7206,7 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -6682,6 +7217,7 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -6692,6 +7228,7 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -6702,6 +7239,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -6712,6 +7250,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -6722,6 +7261,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -6732,6 +7272,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -6742,6 +7283,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -6752,6 +7294,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -6762,6 +7305,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -6772,6 +7316,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6788,6 +7333,7 @@ vld1_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6804,6 +7350,7 @@ vld1q_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6820,6 +7367,7 @@ vld1_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6836,6 +7384,7 @@ vld1q_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6852,6 +7401,7 @@ vld1_f32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6866,7 +7416,7 @@ vld1q_f32_x4_(a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6880,7 +7430,8 @@ vld2_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6894,7 +7445,7 @@ vld2_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6908,7 +7459,8 @@ vld2_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6922,7 +7474,7 @@ vld2_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6936,7 +7488,8 @@ vld2_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6950,7 +7503,7 @@ vld2_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6964,7 +7517,8 @@ vld2q_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6978,7 +7532,7 @@ vld2q_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6992,7 +7546,8 @@ vld2q_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7006,7 +7561,7 @@ vld2q_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7020,7 +7575,8 @@ vld2q_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7034,7 +7590,7 @@ vld2q_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7048,7 +7604,8 @@ vld2_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7064,6 +7621,7 @@ vld2_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -7074,6 +7632,7 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -7084,6 +7643,7 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_s32(transmute(a))) } @@ -7094,6 +7654,7 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -7104,6 +7665,7 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -7114,6 +7676,7 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_s32(transmute(a))) } @@ -7124,6 +7687,7 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -7134,6 +7698,7 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -7144,6 +7709,7 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -7154,6 +7720,7 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -7164,6 +7731,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -7174,6 +7742,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -7182,7 +7751,7 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7196,7 +7765,8 @@ vld2_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7210,7 +7780,7 @@ vld2_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7224,7 +7794,8 @@ vld2q_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] +#[cfg_attr(test, assert_instr(ld2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7238,7 +7809,7 @@ vld2q_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7252,7 +7823,8 @@ vld2_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7266,7 +7838,7 @@ vld2_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7280,7 +7852,8 @@ vld2_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7294,7 +7867,7 @@ vld2_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7308,7 +7881,8 @@ vld2_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7322,7 +7896,7 @@ vld2_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7336,7 +7910,8 @@ vld2q_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7350,7 +7925,7 @@ vld2q_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7364,7 +7939,8 @@ vld2q_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7378,7 +7954,7 @@ vld2q_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7392,7 +7968,8 @@ vld2q_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7406,7 +7983,7 @@ vld2q_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7420,7 +7997,8 @@ vld2_dup_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7436,6 +8014,7 @@ vld2_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -7446,6 +8025,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -7456,6 +8036,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_dup_s32(transmute(a))) } @@ -7466,6 +8047,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -7476,6 +8058,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -7486,6 +8069,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_dup_s32(transmute(a))) } @@ -7496,6 +8080,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -7506,6 +8091,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -7516,6 +8102,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -7526,6 +8113,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -7536,6 +8124,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -7546,6 +8135,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -7554,7 +8144,7 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7568,7 +8158,8 @@ vld2_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7582,7 +8173,7 @@ vld2_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7596,7 +8187,8 @@ vld2q_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] +#[cfg_attr(test, assert_instr(ld2r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7610,7 +8202,7 @@ vld2q_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_imm3!(LANE); @@ -7626,8 +8218,9 @@ vld2_lane_s8_(a as _, b.0, b.1, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -7642,7 +8235,7 @@ vld2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_imm2!(LANE); @@ -7658,8 +8251,9 @@ vld2_lane_s16_(a as _, b.0, b.1, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -7674,7 +8268,7 @@ vld2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_imm1!(LANE); @@ -7690,8 +8284,9 @@ vld2_lane_s32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -7706,7 +8301,7 @@ vld2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_imm3!(LANE); @@ -7722,8 +8317,9 @@ vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -7738,7 +8334,7 @@ vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_imm2!(LANE); @@ -7754,8 +8350,9 @@ vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -7773,6 +8370,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -7785,6 +8383,7 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -7797,6 +8396,7 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { static_assert_imm1!(LANE); transmute(vld2_lane_s32::(transmute(a), transmute(b))) @@ -7809,6 +8409,7 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -7821,6 +8422,7 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { static_assert_imm2!(LANE); transmute(vld2q_lane_s32::(transmute(a), transmute(b))) @@ -7833,6 +8435,7 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -7845,6 +8448,7 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -7857,6 +8461,7 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -7866,7 +8471,7 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_imm1!(LANE); @@ -7882,8 +8487,9 @@ vld2_lane_f32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -7898,7 +8504,7 @@ vld2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_imm2!(LANE); @@ -7914,8 +8520,9 @@ vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -7930,7 +8537,7 @@ vld2q_lane_f32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7944,7 +8551,8 @@ vld3_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7958,7 +8566,7 @@ vld3_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7972,7 +8580,8 @@ vld3_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7986,7 +8595,7 @@ vld3_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8000,7 +8609,8 @@ vld3_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8014,7 +8624,7 @@ vld3_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8028,7 +8638,8 @@ vld3q_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8042,7 +8653,7 @@ vld3q_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8056,7 +8667,8 @@ vld3q_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8070,7 +8682,7 @@ vld3q_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8084,7 +8696,8 @@ vld3q_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8098,7 +8711,7 @@ vld3q_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8112,7 +8725,8 @@ vld3_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8128,6 +8742,7 @@ vld3_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -8138,6 +8753,7 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -8148,6 +8764,7 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_s32(transmute(a))) } @@ -8158,6 +8775,7 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -8168,6 +8786,7 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -8178,6 +8797,7 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_s32(transmute(a))) } @@ -8188,6 +8808,7 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -8198,6 +8819,7 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -8208,6 +8830,7 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -8218,6 +8841,7 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -8228,6 +8852,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -8238,6 +8863,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -8246,7 +8872,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8260,7 +8886,8 @@ vld3_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8274,7 +8901,7 @@ vld3_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8288,7 +8915,8 @@ vld3q_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] +#[cfg_attr(test, assert_instr(ld3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8302,7 +8930,7 @@ vld3q_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8316,7 +8944,8 @@ vld3_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8330,7 +8959,7 @@ vld3_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8344,7 +8973,8 @@ vld3_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8358,7 +8988,7 @@ vld3_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8372,7 +9002,8 @@ vld3_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8386,7 +9017,7 @@ vld3_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8400,7 +9031,8 @@ vld3q_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8414,7 +9046,7 @@ vld3q_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8428,7 +9060,8 @@ vld3q_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8442,7 +9075,7 @@ vld3q_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8456,7 +9089,8 @@ vld3q_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8470,7 +9104,7 @@ vld3q_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8484,7 +9118,8 @@ vld3_dup_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8500,6 +9135,7 @@ vld3_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -8510,6 +9146,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -8520,6 +9157,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_dup_s32(transmute(a))) } @@ -8530,6 +9168,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -8540,6 +9179,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -8550,6 +9190,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_dup_s32(transmute(a))) } @@ -8560,6 +9201,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -8570,6 +9212,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -8580,6 +9223,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -8590,6 +9234,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -8600,6 +9245,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -8610,6 +9256,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -8618,7 +9265,7 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8632,7 +9279,8 @@ vld3_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8646,7 +9294,7 @@ vld3_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8660,7 +9308,8 @@ vld3q_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] +#[cfg_attr(test, assert_instr(ld3r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8674,7 +9323,7 @@ vld3q_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_imm3!(LANE); @@ -8690,8 +9339,9 @@ vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -8706,7 +9356,7 @@ vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_imm2!(LANE); @@ -8722,8 +9372,9 @@ vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -8738,7 +9389,7 @@ vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_imm1!(LANE); @@ -8754,8 +9405,9 @@ vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -8770,7 +9422,7 @@ vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_imm3!(LANE); @@ -8786,8 +9438,9 @@ vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -8802,7 +9455,7 @@ vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_imm2!(LANE); @@ -8818,8 +9471,9 @@ vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -8837,6 +9491,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -8849,6 +9504,7 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -8861,6 +9517,7 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { static_assert_imm1!(LANE); transmute(vld3_lane_s32::(transmute(a), transmute(b))) @@ -8873,6 +9530,7 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -8885,6 +9543,7 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { static_assert_imm2!(LANE); transmute(vld3q_lane_s32::(transmute(a), transmute(b))) @@ -8897,6 +9556,7 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -8909,6 +9569,7 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -8921,6 +9582,7 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -8930,7 +9592,7 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_imm1!(LANE); @@ -8946,8 +9608,9 @@ vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -8962,7 +9625,7 @@ vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_imm2!(LANE); @@ -8978,8 +9641,9 @@ vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -8994,7 +9658,7 @@ vld3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9008,7 +9672,8 @@ vld4_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9022,7 +9687,7 @@ vld4_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9036,7 +9701,8 @@ vld4_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9050,7 +9716,7 @@ vld4_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9064,7 +9730,8 @@ vld4_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9078,7 +9745,7 @@ vld4_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9092,7 +9759,8 @@ vld4q_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9106,7 +9774,7 @@ vld4q_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9120,7 +9788,8 @@ vld4q_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9134,7 +9803,7 @@ vld4q_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9148,7 +9817,8 @@ vld4q_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9162,7 +9832,7 @@ vld4q_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9176,7 +9846,8 @@ vld4_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9192,6 +9863,7 @@ vld4_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -9202,6 +9874,7 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -9212,6 +9885,7 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_s32(transmute(a))) } @@ -9222,6 +9896,7 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -9232,6 +9907,7 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -9242,6 +9918,7 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_s32(transmute(a))) } @@ -9252,6 +9929,7 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -9262,6 +9940,7 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -9272,6 +9951,7 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -9282,6 +9962,7 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -9292,6 +9973,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -9302,6 +9984,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -9310,7 +9993,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9324,7 +10007,8 @@ vld4_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9338,7 +10022,7 @@ vld4_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9352,7 +10036,8 @@ vld4q_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] +#[cfg_attr(test, assert_instr(ld4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9366,7 +10051,7 @@ vld4q_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9380,7 +10065,8 @@ vld4_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9394,7 +10080,7 @@ vld4_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9408,7 +10094,8 @@ vld4_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9422,7 +10109,7 @@ vld4_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9436,7 +10123,8 @@ vld4_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9450,7 +10138,7 @@ vld4_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9464,7 +10152,8 @@ vld4q_dup_s8_(a as *const i8, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9478,7 +10167,7 @@ vld4q_dup_s8_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9492,7 +10181,8 @@ vld4q_dup_s16_(a as *const i8, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9506,7 +10196,7 @@ vld4q_dup_s16_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9520,7 +10210,8 @@ vld4q_dup_s32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9534,7 +10225,7 @@ vld4q_dup_s32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9548,7 +10239,8 @@ vld4_dup_s64_(a as *const i8, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9564,6 +10256,7 @@ vld4_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -9574,6 +10267,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -9584,6 +10278,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_dup_s32(transmute(a))) } @@ -9594,6 +10289,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -9604,6 +10300,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -9614,6 +10311,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_dup_s32(transmute(a))) } @@ -9624,6 +10322,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -9634,6 +10333,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -9644,6 +10344,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -9654,6 +10355,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -9664,6 +10366,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -9674,6 +10377,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -9682,7 +10386,7 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9696,7 +10400,8 @@ vld4_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9710,7 +10415,7 @@ vld4_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9724,7 +10429,8 @@ vld4q_dup_f32_(a as *const i8, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9738,7 +10444,7 @@ vld4q_dup_f32_(a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_imm3!(LANE); @@ -9754,8 +10460,9 @@ vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -9770,7 +10477,7 @@ vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_imm2!(LANE); @@ -9786,8 +10493,9 @@ vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -9802,7 +10510,7 @@ vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_imm1!(LANE); @@ -9818,8 +10526,9 @@ vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -9834,7 +10543,7 @@ vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_imm3!(LANE); @@ -9850,8 +10559,9 @@ vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -9866,7 +10576,7 @@ vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_imm2!(LANE); @@ -9882,8 +10592,9 @@ vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -9901,6 +10612,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -9913,6 +10625,7 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -9925,6 +10638,7 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { static_assert_imm1!(LANE); transmute(vld4_lane_s32::(transmute(a), transmute(b))) @@ -9937,6 +10651,7 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -9949,6 +10664,7 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { static_assert_imm2!(LANE); transmute(vld4q_lane_s32::(transmute(a), transmute(b))) @@ -9961,6 +10677,7 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -9973,6 +10690,7 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -9985,6 +10703,7 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -9994,7 +10713,7 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_imm1!(LANE); @@ -10010,8 +10729,9 @@ vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -10026,7 +10746,7 @@ vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_imm2!(LANE); @@ -10042,8 +10762,9 @@ vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -10061,6 +10782,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10073,6 +10795,7 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10085,6 +10808,7 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10097,6 +10821,7 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -10109,6 +10834,7 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -10121,6 +10847,7 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10133,6 +10860,7 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10145,6 +10873,7 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10157,6 +10886,7 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10169,6 +10899,7 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10181,6 +10912,7 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10193,6 +10925,7 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -10205,6 +10938,7 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -10217,6 +10951,7 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10229,6 +10964,7 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10241,6 +10977,7 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10253,6 +10990,7 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10265,6 +11003,7 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10277,6 +11016,7 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -10289,6 +11029,7 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -10301,6 +11042,7 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -10313,6 +11055,7 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10325,6 +11068,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -10337,6 +11081,7 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -10346,7 +11091,7 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10360,7 +11105,8 @@ vst1_s8_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10374,7 +11120,7 @@ vst1_s8_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10388,7 +11134,8 @@ vst1_s16_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10402,7 +11149,7 @@ vst1_s16_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10416,7 +11163,8 @@ vst1_s32_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10430,7 +11178,7 @@ vst1_s32_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10444,7 +11192,8 @@ vst1_s64_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10458,7 +11207,7 @@ vst1_s64_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10472,7 +11221,8 @@ vst1q_s8_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10486,7 +11236,7 @@ vst1q_s8_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10500,7 +11250,8 @@ vst1q_s16_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10514,7 +11265,7 @@ vst1q_s16_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10528,7 +11279,8 @@ vst1q_s32_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10542,7 +11294,7 @@ vst1q_s32_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10556,7 +11308,8 @@ vst1q_s64_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10570,7 +11323,7 @@ vst1q_s64_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10584,7 +11337,8 @@ vst1_s8_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10598,7 +11352,7 @@ vst1_s8_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10612,7 +11366,8 @@ vst1_s16_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10626,7 +11381,7 @@ vst1_s16_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10640,7 +11395,8 @@ vst1_s32_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10654,7 +11410,7 @@ vst1_s32_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10668,7 +11424,8 @@ vst1_s64_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10682,7 +11439,7 @@ vst1_s64_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10696,7 +11453,8 @@ vst1q_s8_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10710,7 +11468,7 @@ vst1q_s8_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10724,7 +11482,8 @@ vst1q_s16_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10738,7 +11497,7 @@ vst1q_s16_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10752,7 +11511,8 @@ vst1q_s32_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10766,7 +11526,7 @@ vst1q_s32_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10780,7 +11540,8 @@ vst1q_s64_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10794,7 +11555,7 @@ vst1q_s64_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10808,7 +11569,8 @@ vst1_s8_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10822,7 +11584,7 @@ vst1_s8_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10836,7 +11598,8 @@ vst1_s16_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10850,7 +11613,7 @@ vst1_s16_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10864,7 +11627,8 @@ vst1_s32_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10878,7 +11642,7 @@ vst1_s32_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10892,7 +11656,8 @@ vst1_s64_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10906,7 +11671,7 @@ vst1_s64_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10920,7 +11685,8 @@ vst1q_s8_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10934,7 +11700,7 @@ vst1q_s8_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10948,7 +11714,8 @@ vst1q_s16_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10962,7 +11729,7 @@ vst1q_s16_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10976,7 +11743,8 @@ vst1q_s32_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10990,7 +11758,7 @@ vst1q_s32_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11004,7 +11772,8 @@ vst1q_s64_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11020,6 +11789,7 @@ vst1q_s64_x4_(b.0, b.1, b.2, b.3, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -11030,6 +11800,7 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -11040,6 +11811,7 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { vst1_s32_x2(transmute(a), transmute(b)) } @@ -11050,6 +11822,7 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -11060,6 +11833,7 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -11070,6 +11844,7 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -11080,6 +11855,7 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { vst1q_s32_x2(transmute(a), transmute(b)) } @@ -11090,6 +11866,7 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -11100,6 +11877,7 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -11110,6 +11888,7 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -11120,6 +11899,7 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { vst1_s32_x3(transmute(a), transmute(b)) } @@ -11130,6 +11910,7 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -11140,6 +11921,7 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -11150,6 +11932,7 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -11160,6 +11943,7 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { vst1q_s32_x3(transmute(a), transmute(b)) } @@ -11170,6 +11954,7 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -11180,6 +11965,7 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -11190,6 +11976,7 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -11200,6 +11987,7 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { vst1_s32_x4(transmute(a), transmute(b)) } @@ -11210,6 +11998,7 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -11220,6 +12009,7 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -11230,6 +12020,7 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -11240,6 +12031,7 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { vst1q_s32_x4(transmute(a), transmute(b)) } @@ -11250,6 +12042,7 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -11260,6 +12053,7 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -11270,6 +12064,7 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -11280,6 +12075,7 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -11290,6 +12086,7 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -11300,6 +12097,7 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -11310,6 +12108,7 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -11320,6 +12119,7 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -11330,6 +12130,7 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -11340,6 +12141,7 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -11350,6 +12152,7 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -11360,6 +12163,7 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -11370,6 +12174,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -11380,6 +12185,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -11390,6 +12196,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -11400,6 +12207,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -11410,6 +12218,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -11420,6 +12229,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -11430,6 +12240,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -11438,7 +12249,7 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11452,7 +12263,8 @@ vst1_f32_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11466,7 +12278,7 @@ vst1_f32_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11480,7 +12292,8 @@ vst1q_f32_x2_(a, b.0, b.1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11494,7 +12307,7 @@ vst1q_f32_x2_(b.0, b.1, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11508,7 +12321,8 @@ vst1_f32_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11522,7 +12336,7 @@ vst1_f32_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11536,7 +12350,8 @@ vst1q_f32_x3_(a, b.0, b.1, b.2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11550,7 +12365,7 @@ vst1q_f32_x3_(b.0, b.1, b.2, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11564,7 +12379,8 @@ vst1_f32_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11578,7 +12394,7 @@ vst1_f32_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11592,7 +12408,8 @@ vst1q_f32_x4_(a, b.0, b.1, b.2, b.3) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11606,7 +12423,7 @@ vst1q_f32_x4_(b.0, b.1, b.2, b.3, a) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11620,7 +12437,8 @@ vst2_s8_(a as _, b.0, b.1, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11634,7 +12452,7 @@ vst2_s8_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11648,7 +12466,8 @@ vst2_s16_(a as _, b.0, b.1, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11662,7 +12481,7 @@ vst2_s16_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11676,7 +12495,8 @@ vst2_s32_(a as _, b.0, b.1, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11690,7 +12510,7 @@ vst2_s32_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11704,7 +12524,8 @@ vst2q_s8_(a as _, b.0, b.1, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11718,7 +12539,7 @@ vst2q_s8_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11732,7 +12553,8 @@ vst2q_s16_(a as _, b.0, b.1, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11746,7 +12568,7 @@ vst2q_s16_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11760,7 +12582,8 @@ vst2q_s32_(a as _, b.0, b.1, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11774,7 +12597,7 @@ vst2q_s32_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11788,7 +12611,8 @@ vst2_s64_(a as _, b.0, b.1, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11804,6 +12628,7 @@ vst2_s64_(b.0, b.1, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -11814,6 +12639,7 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -11824,6 +12650,7 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { transmute(vst2_s32(transmute(a), transmute(b))) } @@ -11834,6 +12661,7 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -11844,6 +12672,7 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -11854,6 +12683,7 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { transmute(vst2q_s32(transmute(a), transmute(b))) } @@ -11864,6 +12694,7 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -11874,6 +12705,7 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -11884,6 +12716,7 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -11894,6 +12727,7 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -11904,6 +12738,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -11914,6 +12749,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -11922,7 +12758,7 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11936,7 +12772,8 @@ vst2_f32_(a as _, b.0, b.1, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11950,7 +12787,7 @@ vst2_f32_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11964,7 +12801,8 @@ vst2q_f32_(a as _, b.0, b.1, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] +#[cfg_attr(test, assert_instr(st2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11978,7 +12816,7 @@ vst2q_f32_(b.0, b.1, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_imm3!(LANE); @@ -11994,8 +12832,9 @@ vst2_lane_s8_(a as _, b.0, b.1, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -12010,7 +12849,7 @@ vst2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_imm2!(LANE); @@ -12026,8 +12865,9 @@ vst2_lane_s16_(a as _, b.0, b.1, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12042,7 +12882,7 @@ vst2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_imm1!(LANE); @@ -12058,8 +12898,9 @@ vst2_lane_s32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -12074,7 +12915,7 @@ vst2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_imm3!(LANE); @@ -12090,8 +12931,9 @@ vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -12106,7 +12948,7 @@ vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_imm2!(LANE); @@ -12122,8 +12964,9 @@ vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12141,6 +12984,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -12153,6 +12997,7 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -12165,6 +13010,7 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { static_assert_imm1!(LANE); transmute(vst2_lane_s32::(transmute(a), transmute(b))) @@ -12177,6 +13023,7 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -12189,6 +13036,7 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { static_assert_imm2!(LANE); transmute(vst2q_lane_s32::(transmute(a), transmute(b))) @@ -12201,6 +13049,7 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -12213,6 +13062,7 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -12225,6 +13075,7 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -12234,7 +13085,7 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_imm1!(LANE); @@ -12250,8 +13101,9 @@ vst2_lane_f32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -12266,7 +13118,7 @@ vst2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_imm2!(LANE); @@ -12282,8 +13134,9 @@ vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12298,7 +13151,7 @@ vst2q_lane_f32_(b.0, b.1, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12312,7 +13165,8 @@ vst3_s8_(a as _, b.0, b.1, b.2, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12326,7 +13180,7 @@ vst3_s8_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12340,7 +13194,8 @@ vst3_s16_(a as _, b.0, b.1, b.2, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12354,7 +13209,7 @@ vst3_s16_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12368,7 +13223,8 @@ vst3_s32_(a as _, b.0, b.1, b.2, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12382,7 +13238,7 @@ vst3_s32_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12396,7 +13252,8 @@ vst3q_s8_(a as _, b.0, b.1, b.2, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12410,7 +13267,7 @@ vst3q_s8_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12424,7 +13281,8 @@ vst3q_s16_(a as _, b.0, b.1, b.2, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12438,7 +13296,7 @@ vst3q_s16_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12452,7 +13310,8 @@ vst3q_s32_(a as _, b.0, b.1, b.2, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12466,7 +13325,7 @@ vst3q_s32_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12480,7 +13339,8 @@ vst3_s64_(a as _, b.0, b.1, b.2, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12496,6 +13356,7 @@ vst3_s64_(b.0, b.1, b.2, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -12506,6 +13367,7 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -12516,6 +13378,7 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { transmute(vst3_s32(transmute(a), transmute(b))) } @@ -12526,6 +13389,7 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -12536,6 +13400,7 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -12546,6 +13411,7 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { transmute(vst3q_s32(transmute(a), transmute(b))) } @@ -12556,6 +13422,7 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -12566,6 +13433,7 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -12576,6 +13444,7 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -12586,6 +13455,7 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -12596,6 +13466,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -12606,6 +13477,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -12614,7 +13486,7 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12628,7 +13500,8 @@ vst3_f32_(a as _, b.0, b.1, b.2, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12642,7 +13515,7 @@ vst3_f32_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12656,7 +13529,8 @@ vst3q_f32_(a as _, b.0, b.1, b.2, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] +#[cfg_attr(test, assert_instr(st3))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12670,7 +13544,7 @@ vst3q_f32_(b.0, b.1, b.2, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_imm3!(LANE); @@ -12686,8 +13560,9 @@ vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -12702,7 +13577,7 @@ vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_imm2!(LANE); @@ -12718,8 +13593,9 @@ vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12734,7 +13610,7 @@ vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_imm1!(LANE); @@ -12750,8 +13626,9 @@ vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -12766,7 +13643,7 @@ vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_imm3!(LANE); @@ -12782,8 +13659,9 @@ vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -12798,7 +13676,7 @@ vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_imm2!(LANE); @@ -12814,8 +13692,9 @@ vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12833,6 +13712,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -12845,6 +13725,7 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -12857,6 +13738,7 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { static_assert_imm1!(LANE); transmute(vst3_lane_s32::(transmute(a), transmute(b))) @@ -12869,6 +13751,7 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -12881,6 +13764,7 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { static_assert_imm2!(LANE); transmute(vst3q_lane_s32::(transmute(a), transmute(b))) @@ -12893,6 +13777,7 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -12905,6 +13790,7 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -12917,6 +13803,7 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -12926,7 +13813,7 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_imm1!(LANE); @@ -12942,8 +13829,9 @@ vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -12958,7 +13846,7 @@ vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_imm2!(LANE); @@ -12974,8 +13862,9 @@ vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -12990,7 +13879,7 @@ vst3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13004,7 +13893,8 @@ vst4_s8_(a as _, b.0, b.1, b.2, b.3, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13018,7 +13908,7 @@ vst4_s8_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13032,7 +13922,8 @@ vst4_s16_(a as _, b.0, b.1, b.2, b.3, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13046,7 +13937,7 @@ vst4_s16_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13060,7 +13951,8 @@ vst4_s32_(a as _, b.0, b.1, b.2, b.3, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13074,7 +13966,7 @@ vst4_s32_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13088,7 +13980,8 @@ vst4q_s8_(a as _, b.0, b.1, b.2, b.3, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13102,7 +13995,7 @@ vst4q_s8_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13116,7 +14009,8 @@ vst4q_s16_(a as _, b.0, b.1, b.2, b.3, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13130,7 +14024,7 @@ vst4q_s16_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13144,7 +14038,8 @@ vst4q_s32_(a as _, b.0, b.1, b.2, b.3, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13158,7 +14053,7 @@ vst4q_s32_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13172,7 +14067,8 @@ vst4_s64_(a as _, b.0, b.1, b.2, b.3, 8) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13188,6 +14084,7 @@ vst4_s64_(b.0, b.1, b.2, b.3, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -13198,6 +14095,7 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -13208,6 +14106,7 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { transmute(vst4_s32(transmute(a), transmute(b))) } @@ -13218,6 +14117,7 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -13228,6 +14128,7 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -13238,6 +14139,7 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { transmute(vst4q_s32(transmute(a), transmute(b))) } @@ -13248,6 +14150,7 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -13258,6 +14161,7 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -13268,6 +14172,7 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -13278,6 +14183,7 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -13288,6 +14194,7 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -13298,6 +14205,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -13306,7 +14214,7 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13320,7 +14228,8 @@ vst4_f32_(a as _, b.0, b.1, b.2, b.3, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13334,7 +14243,7 @@ vst4_f32_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13348,7 +14257,8 @@ vst4q_f32_(a as _, b.0, b.1, b.2, b.3, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] +#[cfg_attr(test, assert_instr(st4))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13362,7 +14272,7 @@ vst4q_f32_(b.0, b.1, b.2, b.3, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_imm3!(LANE); @@ -13378,8 +14288,9 @@ vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -13394,7 +14305,7 @@ vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_imm2!(LANE); @@ -13410,8 +14321,9 @@ vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -13426,7 +14338,7 @@ vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_imm1!(LANE); @@ -13442,8 +14354,9 @@ vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -13458,7 +14371,7 @@ vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_imm3!(LANE); @@ -13474,8 +14387,9 @@ vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_imm3!(LANE); #[allow(improper_ctypes)] @@ -13490,7 +14404,7 @@ vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_imm2!(LANE); @@ -13506,8 +14420,9 @@ vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -13525,6 +14440,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -13537,6 +14453,7 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -13549,6 +14466,7 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { static_assert_imm1!(LANE); transmute(vst4_lane_s32::(transmute(a), transmute(b))) @@ -13561,6 +14479,7 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -13573,6 +14492,7 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { static_assert_imm2!(LANE); transmute(vst4q_lane_s32::(transmute(a), transmute(b))) @@ -13585,6 +14505,7 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -13597,6 +14518,7 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -13609,6 +14531,7 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -13618,7 +14541,7 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_imm1!(LANE); @@ -13634,8 +14557,9 @@ vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_imm1!(LANE); #[allow(improper_ctypes)] @@ -13650,7 +14574,7 @@ vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_imm2!(LANE); @@ -13666,8 +14590,9 @@ vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_imm2!(LANE); #[allow(improper_ctypes)] @@ -13684,6 +14609,7 @@ vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_mul(a, b) } @@ -13694,6 +14620,7 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_mul(a, b) } @@ -13704,6 +14631,7 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_mul(a, b) } @@ -13714,6 +14642,7 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_mul(a, b) } @@ -13724,6 +14653,7 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_mul(a, b) } @@ -13734,6 +14664,7 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_mul(a, b) } @@ -13744,6 +14675,7 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_mul(a, b) } @@ -13754,6 +14686,7 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_mul(a, b) } @@ -13764,6 +14697,7 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_mul(a, b) } @@ -13774,6 +14708,7 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_mul(a, b) } @@ -13784,6 +14719,7 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_mul(a, b) } @@ -13794,6 +14730,7 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_mul(a, b) } @@ -13804,6 +14741,7 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13820,6 +14758,7 @@ vmul_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13836,6 +14775,7 @@ vmulq_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_mul(a, b) } @@ -13846,6 +14786,7 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_mul(a, b) } @@ -13856,6 +14797,7 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { simd_mul(a, vdup_n_s16(b)) } @@ -13866,6 +14808,7 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { simd_mul(a, vdupq_n_s16(b)) } @@ -13876,6 +14819,7 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { simd_mul(a, vdup_n_s32(b)) } @@ -13886,6 +14830,7 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { simd_mul(a, vdupq_n_s32(b)) } @@ -13896,6 +14841,7 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { simd_mul(a, vdup_n_u16(b)) } @@ -13906,6 +14852,7 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { simd_mul(a, vdupq_n_u16(b)) } @@ -13916,6 +14863,7 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { simd_mul(a, vdup_n_u32(b)) } @@ -13926,6 +14874,7 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { simd_mul(a, vdupq_n_u32(b)) } @@ -13936,6 +14885,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { simd_mul(a, vdup_n_f32(b)) } @@ -13946,6 +14896,7 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { simd_mul(a, vdupq_n_f32(b)) } @@ -13957,6 +14908,7 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -13969,6 +14921,7 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -13981,6 +14934,7 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -13993,6 +14947,7 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14005,6 +14960,7 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14017,6 +14973,7 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14029,6 +14986,7 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14041,6 +14999,7 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14053,6 +15012,7 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14065,6 +15025,7 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14077,6 +15038,7 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14089,6 +15051,7 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14101,6 +15064,7 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14113,6 +15077,7 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14125,6 +15090,7 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14137,6 +15103,7 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14149,6 +15116,7 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14161,6 +15129,7 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14173,6 +15142,7 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14185,6 +15155,7 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14196,6 +15167,7 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14212,6 +15184,7 @@ vmull_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14228,6 +15201,7 @@ vmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14244,6 +15218,7 @@ vmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14260,6 +15235,7 @@ vmull_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14276,6 +15252,7 @@ vmull_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14292,6 +15269,7 @@ vmull_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14308,6 +15286,7 @@ vmull_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vmull_s16(a, vdup_n_s16(b)) } @@ -14318,6 +15297,7 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vmull_s32(a, vdup_n_s32(b)) } @@ -14328,6 +15308,7 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { vmull_u16(a, vdup_n_u16(b)) } @@ -14338,6 +15319,7 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { vmull_u32(a, vdup_n_u32(b)) } @@ -14349,6 +15331,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmull_s16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14361,6 +15344,7 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmull_s16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14373,6 +15357,7 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmull_s32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14385,6 +15370,7 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmull_s32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14397,6 +15383,7 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmull_u16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14409,6 +15396,7 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmull_u16(a, simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -14421,6 +15409,7 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmull_u32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14433,6 +15422,7 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmull_u32(a, simd_shuffle2!(b, b, [LANE as u32, LANE as u32])) @@ -14444,6 +15434,7 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14460,6 +15451,7 @@ vfma_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14476,6 +15468,7 @@ vfmaq_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfma_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -14486,6 +15479,7 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -14496,6 +15490,7 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { let b: float32x2_t = simd_neg(b); vfma_f32(a, b, c) @@ -14507,6 +15502,7 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { let b: float32x4_t = simd_neg(b); vfmaq_f32(a, b, c) @@ -14518,6 +15514,7 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfms_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -14528,6 +15525,7 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -14538,6 +15536,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_sub(a, b) } @@ -14548,6 +15547,7 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_sub(a, b) } @@ -14558,6 +15558,7 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_sub(a, b) } @@ -14568,6 +15569,7 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_sub(a, b) } @@ -14578,6 +15580,7 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_sub(a, b) } @@ -14588,6 +15591,7 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_sub(a, b) } @@ -14598,6 +15602,7 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_sub(a, b) } @@ -14608,6 +15613,7 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_sub(a, b) } @@ -14618,6 +15624,7 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_sub(a, b) } @@ -14628,6 +15635,7 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_sub(a, b) } @@ -14638,6 +15646,7 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_sub(a, b) } @@ -14648,6 +15657,7 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_sub(a, b) } @@ -14658,6 +15668,7 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_sub(a, b) } @@ -14668,6 +15679,7 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_sub(a, b) } @@ -14678,6 +15690,7 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_sub(a, b) } @@ -14688,6 +15701,7 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_sub(a, b) } @@ -14698,6 +15712,7 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_sub(a, b) } @@ -14708,6 +15723,7 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_sub(a, b) } @@ -14718,6 +15734,7 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_xor(a, b) } @@ -14728,6 +15745,7 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_xor(a, b) } @@ -14738,6 +15756,7 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_xor(a, b) } @@ -14748,6 +15767,7 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_xor(a, b) } @@ -14758,6 +15778,7 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { simd_xor(a, b) } @@ -14768,6 +15789,7 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_xor(a, b) } @@ -14778,6 +15800,7 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { a ^ b } @@ -14788,6 +15811,7 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14799,6 +15823,7 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let c: i32x4 = i32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14810,6 +15835,7 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let c: i64x2 = i64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14821,6 +15847,7 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14832,6 +15859,7 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let c: u32x4 = u32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14843,6 +15871,7 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let c: u64x2 = u64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -14854,6 +15883,7 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { let d: int8x8_t = vsubhn_s16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -14865,6 +15895,7 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { let d: int16x4_t = vsubhn_s32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -14876,6 +15907,7 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { let d: int32x2_t = vsubhn_s64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -14887,6 +15919,7 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { let d: uint8x8_t = vsubhn_u16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -14898,6 +15931,7 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { let d: uint16x4_t = vsubhn_u32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -14909,6 +15943,7 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { let d: uint32x2_t = vsubhn_u64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -14920,6 +15955,7 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14936,6 +15972,7 @@ vhsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14952,6 +15989,7 @@ vhsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14968,6 +16006,7 @@ vhsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14984,6 +16023,7 @@ vhsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15000,6 +16040,7 @@ vhsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15016,6 +16057,7 @@ vhsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15032,6 +16074,7 @@ vhsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15048,6 +16091,7 @@ vhsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15064,6 +16108,7 @@ vhsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15080,6 +16125,7 @@ vhsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15096,6 +16142,7 @@ vhsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15112,6 +16159,7 @@ vhsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { simd_sub(a, simd_cast(b)) } @@ -15122,6 +16170,7 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { simd_sub(a, simd_cast(b)) } @@ -15132,6 +16181,7 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { simd_sub(a, simd_cast(b)) } @@ -15142,6 +16192,7 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { simd_sub(a, simd_cast(b)) } @@ -15152,6 +16203,7 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { simd_sub(a, simd_cast(b)) } @@ -15162,6 +16214,7 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { simd_sub(a, simd_cast(b)) } @@ -15172,6 +16225,7 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: int16x8_t = simd_cast(a); let d: int16x8_t = simd_cast(b); @@ -15184,6 +16238,7 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: int32x4_t = simd_cast(a); let d: int32x4_t = simd_cast(b); @@ -15196,6 +16251,7 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: int64x2_t = simd_cast(a); let d: int64x2_t = simd_cast(b); @@ -15208,6 +16264,7 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let c: uint16x8_t = simd_cast(a); let d: uint16x8_t = simd_cast(b); @@ -15220,6 +16277,7 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let c: uint32x4_t = simd_cast(a); let d: uint32x4_t = simd_cast(b); @@ -15232,6 +16290,7 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let c: uint64x2_t = simd_cast(a); let d: uint64x2_t = simd_cast(b); @@ -15244,6 +16303,7 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15260,6 +16320,7 @@ vmax_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15276,6 +16337,7 @@ vmaxq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15292,6 +16354,7 @@ vmax_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15308,6 +16371,7 @@ vmaxq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15324,6 +16388,7 @@ vmax_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15340,6 +16405,7 @@ vmaxq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15356,6 +16422,7 @@ vmax_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15372,6 +16439,7 @@ vmaxq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15388,6 +16456,7 @@ vmax_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15404,6 +16473,7 @@ vmaxq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15420,6 +16490,7 @@ vmax_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15436,6 +16507,7 @@ vmaxq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15452,6 +16524,7 @@ vmax_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15468,6 +16541,7 @@ vmaxq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15484,6 +16558,7 @@ vmaxnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15500,6 +16575,7 @@ vmaxnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15516,6 +16592,7 @@ vmin_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15532,6 +16609,7 @@ vminq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15548,6 +16626,7 @@ vmin_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15564,6 +16643,7 @@ vminq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15580,6 +16660,7 @@ vmin_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15596,6 +16677,7 @@ vminq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15612,6 +16694,7 @@ vmin_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15628,6 +16711,7 @@ vminq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15644,6 +16728,7 @@ vmin_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15660,6 +16745,7 @@ vminq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15676,6 +16762,7 @@ vmin_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15692,6 +16779,7 @@ vminq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15708,6 +16796,7 @@ vmin_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15724,6 +16813,7 @@ vminq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15740,6 +16830,7 @@ vminnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15756,6 +16847,7 @@ vminnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(faddp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15772,6 +16864,7 @@ vpadd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15788,6 +16881,7 @@ vqdmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15804,6 +16898,7 @@ vqdmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vqdmull_s16(a, vdup_n_s16(b)) } @@ -15814,6 +16909,7 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vqdmull_s32(a, vdup_n_s32(b)) } @@ -15825,6 +16921,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(N); let b: int16x4_t = simd_shuffle4!(b, b, [N as u32, N as u32, N as u32, N as u32]); @@ -15838,6 +16935,7 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(N); let b: int32x2_t = simd_shuffle2!(b, b, [N as u32, N as u32]); @@ -15850,6 +16948,7 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqaddq_s32(a, vqdmull_s16(b, c)) } @@ -15860,6 +16959,7 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqaddq_s64(a, vqdmull_s32(b, c)) } @@ -15870,6 +16970,7 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_n_s16(b, c)) } @@ -15880,6 +16981,7 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_n_s32(b, c)) } @@ -15891,6 +16993,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqaddq_s32(a, vqdmull_lane_s16::(b, c)) @@ -15903,6 +17006,7 @@ pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqaddq_s64(a, vqdmull_lane_s32::(b, c)) @@ -15914,6 +17018,7 @@ pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqsubq_s32(a, vqdmull_s16(b, c)) } @@ -15924,6 +17029,7 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqsubq_s64(a, vqdmull_s32(b, c)) } @@ -15934,6 +17040,7 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_n_s16(b, c)) } @@ -15944,6 +17051,7 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_n_s32(b, c)) } @@ -15955,6 +17063,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqsubq_s32(a, vqdmull_lane_s16::(b, c)) @@ -15967,6 +17076,7 @@ pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqsubq_s64(a, vqdmull_lane_s32::(b, c)) @@ -15978,6 +17088,7 @@ pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15994,6 +17105,7 @@ vqdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16010,6 +17122,7 @@ vqdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16026,6 +17139,7 @@ vqdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16042,6 +17156,7 @@ vqdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmulh_s16(a, b) @@ -16053,6 +17168,7 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmulh_s32(a, b) @@ -16064,6 +17180,7 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let b: int16x8_t = vdupq_n_s16(b); vqdmulhq_s16(a, b) @@ -16075,6 +17192,7 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let b: int32x4_t = vdupq_n_s32(b); vqdmulhq_s32(a, b) @@ -16087,6 +17205,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) @@ -16099,6 +17218,7 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) @@ -16111,6 +17231,7 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) @@ -16123,6 +17244,7 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) @@ -16134,6 +17256,7 @@ pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16150,6 +17273,7 @@ vqmovn_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16166,6 +17290,7 @@ vqmovn_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16182,6 +17307,7 @@ vqmovn_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16198,6 +17324,7 @@ vqmovn_u16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16214,6 +17341,7 @@ vqmovn_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16230,6 +17358,7 @@ vqmovn_u64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16246,6 +17375,7 @@ vqmovun_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16262,6 +17392,7 @@ vqmovun_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16278,6 +17409,7 @@ vqmovun_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16294,6 +17426,7 @@ vqrdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16310,6 +17443,7 @@ vqrdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16326,6 +17460,7 @@ vqrdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16342,6 +17477,7 @@ vqrdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { vqrdmulh_s16(a, vdup_n_s16(b)) } @@ -16352,6 +17488,7 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { vqrdmulhq_s16(a, vdupq_n_s16(b)) } @@ -16362,6 +17499,7 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { vqrdmulh_s32(a, vdup_n_s32(b)) } @@ -16372,6 +17510,7 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { vqrdmulhq_s32(a, vdupq_n_s32(b)) } @@ -16383,6 +17522,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16396,6 +17536,7 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16409,6 +17550,7 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16422,6 +17564,7 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16435,6 +17578,7 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, [LANE as u32, LANE as u32]); @@ -16448,6 +17592,7 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, [LANE as u32, LANE as u32]); @@ -16461,6 +17606,7 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16474,6 +17620,7 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -16486,6 +17633,7 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { vqsub_s16(a, vqrdmulh_s16(b, c)) } @@ -16496,6 +17644,7 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { vqsubq_s16(a, vqrdmulhq_s16(b, c)) } @@ -16506,6 +17655,7 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { vqsub_s32(a, vqrdmulh_s32(b, c)) } @@ -16516,6 +17666,7 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { vqsubq_s32(a, vqrdmulhq_s32(b, c)) } @@ -16527,6 +17678,7 @@ pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vqsub_s16(a, vqrdmulh_lane_s16::(b, c)) @@ -16539,6 +17691,7 @@ pub unsafe fn vqrdmlsh_lane_s16(a: int16x4_t, b: int16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vqsub_s16(a, vqrdmulh_laneq_s16::(b, c)) @@ -16551,6 +17704,7 @@ pub unsafe fn vqrdmlsh_laneq_s16(a: int16x4_t, b: int16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vqsubq_s16(a, vqrdmulhq_lane_s16::(b, c)) @@ -16563,6 +17717,7 @@ pub unsafe fn vqrdmlshq_lane_s16(a: int16x8_t, b: int16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vqsubq_s16(a, vqrdmulhq_laneq_s16::(b, c)) @@ -16575,6 +17730,7 @@ pub unsafe fn vqrdmlshq_laneq_s16(a: int16x8_t, b: int16x8_t, c #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vqsub_s32(a, vqrdmulh_lane_s32::(b, c)) @@ -16587,6 +17743,7 @@ pub unsafe fn vqrdmlsh_lane_s32(a: int32x2_t, b: int32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlsh_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vqsub_s32(a, vqrdmulh_laneq_s32::(b, c)) @@ -16599,6 +17756,7 @@ pub unsafe fn vqrdmlsh_laneq_s32(a: int32x2_t, b: int32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vqsubq_s32(a, vqrdmulhq_lane_s32::(b, c)) @@ -16611,6 +17769,7 @@ pub unsafe fn vqrdmlshq_lane_s32(a: int32x4_t, b: int32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vqsubq_s32(a, vqrdmulhq_laneq_s32::(b, c)) @@ -16622,6 +17781,7 @@ pub unsafe fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16638,6 +17798,7 @@ vqrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16654,6 +17815,7 @@ vqrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16670,6 +17832,7 @@ vqrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16686,6 +17849,7 @@ vqrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16702,6 +17866,7 @@ vqrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16718,6 +17883,7 @@ vqrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16734,6 +17900,7 @@ vqrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16750,6 +17917,7 @@ vqrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16766,6 +17934,7 @@ vqrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16782,6 +17951,7 @@ vqrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16798,6 +17968,7 @@ vqrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16814,6 +17985,7 @@ vqrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16830,6 +18002,7 @@ vqrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16846,6 +18019,7 @@ vqrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16862,6 +18036,7 @@ vqrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16876,7 +18051,7 @@ vqrshlq_u64_(a, b) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -16892,8 +18067,9 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -16908,7 +18084,7 @@ vqrshrn_n_s16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -16924,8 +18100,9 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -16940,7 +18117,7 @@ vqrshrn_n_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -16956,8 +18133,9 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -16972,7 +18150,7 @@ vqrshrn_n_s64_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -16988,8 +18166,9 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -17004,7 +18183,7 @@ vqrshrn_n_u16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -17020,8 +18199,9 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -17036,7 +18216,7 @@ vqrshrn_n_u32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -17052,8 +18232,9 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -17068,7 +18249,7 @@ vqrshrn_n_u64_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -17084,8 +18265,9 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -17100,7 +18282,7 @@ vqrshrun_n_s16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -17116,8 +18298,9 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -17132,7 +18315,7 @@ vqrshrun_n_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -17148,8 +18331,9 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -17166,6 +18350,7 @@ vqrshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17182,6 +18367,7 @@ vqshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17198,6 +18384,7 @@ vqshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17214,6 +18401,7 @@ vqshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17230,6 +18418,7 @@ vqshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17246,6 +18435,7 @@ vqshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17262,6 +18452,7 @@ vqshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17278,6 +18469,7 @@ vqshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17294,6 +18486,7 @@ vqshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17310,6 +18503,7 @@ vqshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17326,6 +18520,7 @@ vqshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17342,6 +18537,7 @@ vqshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17358,6 +18554,7 @@ vqshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17374,6 +18571,7 @@ vqshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17390,6 +18588,7 @@ vqshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17406,6 +18605,7 @@ vqshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17423,6 +18623,7 @@ vqshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); vqshl_s8(a, vdup_n_s8(N as _)) @@ -17435,6 +18636,7 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); vqshlq_s8(a, vdupq_n_s8(N as _)) @@ -17447,6 +18649,7 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); vqshl_s16(a, vdup_n_s16(N as _)) @@ -17459,6 +18662,7 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); vqshlq_s16(a, vdupq_n_s16(N as _)) @@ -17471,6 +18675,7 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); vqshl_s32(a, vdup_n_s32(N as _)) @@ -17483,6 +18688,7 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); vqshlq_s32(a, vdupq_n_s32(N as _)) @@ -17495,6 +18701,7 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); vqshl_s64(a, vdup_n_s64(N as _)) @@ -17507,6 +18714,7 @@ pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); vqshlq_s64(a, vdupq_n_s64(N as _)) @@ -17519,6 +18727,7 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); vqshl_u8(a, vdup_n_s8(N as _)) @@ -17531,6 +18740,7 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); vqshlq_u8(a, vdupq_n_s8(N as _)) @@ -17543,6 +18753,7 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); vqshl_u16(a, vdup_n_s16(N as _)) @@ -17555,6 +18766,7 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); vqshlq_u16(a, vdupq_n_s16(N as _)) @@ -17567,6 +18779,7 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); vqshl_u32(a, vdup_n_s32(N as _)) @@ -17579,6 +18792,7 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); vqshlq_u32(a, vdupq_n_s32(N as _)) @@ -17591,6 +18805,7 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); vqshl_u64(a, vdup_n_s64(N as _)) @@ -17603,6 +18818,7 @@ pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); vqshlq_u64(a, vdupq_n_s64(N as _)) @@ -17612,7 +18828,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_imm3!(N); @@ -17628,8 +18844,9 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_imm3!(N); #[allow(improper_ctypes)] @@ -17644,7 +18861,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_imm4!(N); @@ -17660,8 +18877,9 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_imm4!(N); #[allow(improper_ctypes)] @@ -17676,7 +18894,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16)) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_imm5!(N); @@ -17692,8 +18910,9 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_imm5!(N); #[allow(improper_ctypes)] @@ -17708,7 +18927,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32)) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_imm6!(N); @@ -17724,8 +18943,9 @@ vqshlu_n_s64_(a, int64x1_t(N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_imm6!(N); #[allow(improper_ctypes)] @@ -17740,7 +18960,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64)) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_imm3!(N); @@ -17756,8 +18976,9 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_imm3!(N); #[allow(improper_ctypes)] @@ -17772,7 +18993,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_imm4!(N); @@ -17788,8 +19009,9 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_imm4!(N); #[allow(improper_ctypes)] @@ -17804,7 +19026,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_imm5!(N); @@ -17820,8 +19042,9 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_imm5!(N); #[allow(improper_ctypes)] @@ -17836,7 +19059,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32)) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshlu, N = 2))] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_imm6!(N); @@ -17852,8 +19075,9 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_imm6!(N); #[allow(improper_ctypes)] @@ -17868,7 +19092,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64)) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -17884,8 +19108,9 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -17900,7 +19125,7 @@ vqshrn_n_s16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -17916,8 +19141,9 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -17932,7 +19158,7 @@ vqshrn_n_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -17948,8 +19174,9 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -17964,7 +19191,7 @@ vqshrn_n_s64_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -17980,8 +19207,9 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1 #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -17996,7 +19224,7 @@ vqshrn_n_u16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -18012,8 +19240,9 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -18028,7 +19257,7 @@ vqshrn_n_u32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -18044,8 +19273,9 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshrn, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -18060,7 +19290,7 @@ vqshrn_n_u64_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -18076,8 +19306,9 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -18092,7 +19323,7 @@ vqshrun_n_s16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -18108,8 +19339,9 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -18124,7 +19356,7 @@ vqshrun_n_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshrun, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -18140,8 +19372,9 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshrun, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -18158,6 +19391,7 @@ vqshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18174,6 +19408,7 @@ vrsqrte_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18190,6 +19425,7 @@ vrsqrteq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18206,6 +19442,7 @@ vrsqrte_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18222,6 +19459,7 @@ vrsqrteq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18238,6 +19476,7 @@ vrsqrts_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18254,6 +19493,7 @@ vrsqrtsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18270,6 +19510,7 @@ vrecpe_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18286,6 +19527,7 @@ vrecpeq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18302,6 +19544,7 @@ vrecpe_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18318,6 +19561,7 @@ vrecpeq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18334,6 +19578,7 @@ vrecps_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18350,6 +19595,7 @@ vrecpsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { transmute(a) } @@ -18360,6 +19606,7 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { transmute(a) } @@ -18370,6 +19617,7 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { transmute(a) } @@ -18380,6 +19628,7 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { transmute(a) } @@ -18390,6 +19639,7 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { transmute(a) } @@ -18400,6 +19650,7 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { transmute(a) } @@ -18410,6 +19661,7 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { transmute(a) } @@ -18420,6 +19672,7 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { transmute(a) } @@ -18430,6 +19683,7 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { transmute(a) } @@ -18440,6 +19694,7 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { transmute(a) } @@ -18450,6 +19705,7 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { transmute(a) } @@ -18460,6 +19716,7 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { transmute(a) } @@ -18470,6 +19727,7 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { transmute(a) } @@ -18480,6 +19738,7 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { transmute(a) } @@ -18490,6 +19749,7 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { transmute(a) } @@ -18500,6 +19760,7 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { transmute(a) } @@ -18510,6 +19771,7 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { transmute(a) } @@ -18520,6 +19782,7 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { transmute(a) } @@ -18530,6 +19793,7 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { transmute(a) } @@ -18540,6 +19804,7 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { transmute(a) } @@ -18550,6 +19815,7 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { transmute(a) } @@ -18560,6 +19826,7 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { transmute(a) } @@ -18570,6 +19837,7 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { transmute(a) } @@ -18580,6 +19848,7 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { transmute(a) } @@ -18590,6 +19859,7 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { transmute(a) } @@ -18600,6 +19870,7 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { transmute(a) } @@ -18610,6 +19881,7 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { transmute(a) } @@ -18620,6 +19892,7 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { transmute(a) } @@ -18630,6 +19903,7 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { transmute(a) } @@ -18640,6 +19914,7 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { transmute(a) } @@ -18650,6 +19925,7 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { transmute(a) } @@ -18660,6 +19936,7 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { transmute(a) } @@ -18670,6 +19947,7 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { transmute(a) } @@ -18680,6 +19958,7 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { transmute(a) } @@ -18690,6 +19969,7 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { transmute(a) } @@ -18700,6 +19980,7 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { transmute(a) } @@ -18710,6 +19991,7 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { transmute(a) } @@ -18720,6 +20002,7 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { transmute(a) } @@ -18730,6 +20013,7 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { transmute(a) } @@ -18740,6 +20024,7 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { transmute(a) } @@ -18750,6 +20035,7 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { transmute(a) } @@ -18760,6 +20046,7 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { transmute(a) } @@ -18770,6 +20057,7 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { transmute(a) } @@ -18780,6 +20068,7 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { transmute(a) } @@ -18790,6 +20079,7 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { transmute(a) } @@ -18800,6 +20090,7 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { transmute(a) } @@ -18810,6 +20101,7 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { transmute(a) } @@ -18820,6 +20112,7 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { transmute(a) } @@ -18830,6 +20123,7 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { transmute(a) } @@ -18840,6 +20134,7 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { transmute(a) } @@ -18850,6 +20145,7 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { transmute(a) } @@ -18860,6 +20156,7 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { transmute(a) } @@ -18870,6 +20167,7 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { transmute(a) } @@ -18880,6 +20178,7 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { transmute(a) } @@ -18890,6 +20189,7 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { transmute(a) } @@ -18900,6 +20200,7 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { transmute(a) } @@ -18910,6 +20211,7 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { transmute(a) } @@ -18920,6 +20222,7 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { transmute(a) } @@ -18930,6 +20233,7 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { transmute(a) } @@ -18940,6 +20244,7 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { transmute(a) } @@ -18950,6 +20255,7 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { transmute(a) } @@ -18960,6 +20266,7 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { transmute(a) } @@ -18970,6 +20277,7 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { transmute(a) } @@ -18980,6 +20288,7 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { transmute(a) } @@ -18990,6 +20299,7 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { transmute(a) } @@ -19000,6 +20310,7 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { transmute(a) } @@ -19010,6 +20321,7 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { transmute(a) } @@ -19020,6 +20332,7 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { transmute(a) } @@ -19030,6 +20343,7 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { transmute(a) } @@ -19040,6 +20354,7 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { transmute(a) } @@ -19050,6 +20365,7 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { transmute(a) } @@ -19060,6 +20376,7 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { transmute(a) } @@ -19070,6 +20387,7 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { transmute(a) } @@ -19080,6 +20398,7 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { transmute(a) } @@ -19090,6 +20409,7 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { transmute(a) } @@ -19100,6 +20420,7 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { transmute(a) } @@ -19110,6 +20431,7 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { transmute(a) } @@ -19120,6 +20442,7 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { transmute(a) } @@ -19130,6 +20453,7 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { transmute(a) } @@ -19140,6 +20464,7 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { transmute(a) } @@ -19150,6 +20475,7 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { transmute(a) } @@ -19160,6 +20486,7 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { transmute(a) } @@ -19170,6 +20497,7 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { transmute(a) } @@ -19180,6 +20508,7 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { transmute(a) } @@ -19190,6 +20519,7 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { transmute(a) } @@ -19200,6 +20530,7 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { transmute(a) } @@ -19210,6 +20541,7 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { transmute(a) } @@ -19220,6 +20552,7 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { transmute(a) } @@ -19230,6 +20563,7 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { transmute(a) } @@ -19240,6 +20574,7 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { transmute(a) } @@ -19250,6 +20585,7 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { transmute(a) } @@ -19260,6 +20596,7 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { transmute(a) } @@ -19270,6 +20607,7 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { transmute(a) } @@ -19280,6 +20618,7 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { transmute(a) } @@ -19290,6 +20629,7 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { transmute(a) } @@ -19300,6 +20640,7 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { transmute(a) } @@ -19310,6 +20651,7 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { transmute(a) } @@ -19320,6 +20662,7 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { transmute(a) } @@ -19330,6 +20673,7 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { transmute(a) } @@ -19340,6 +20684,7 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { transmute(a) } @@ -19350,6 +20695,7 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { transmute(a) } @@ -19360,6 +20706,7 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { transmute(a) } @@ -19370,6 +20717,7 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { transmute(a) } @@ -19380,6 +20728,7 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { transmute(a) } @@ -19390,6 +20739,7 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { transmute(a) } @@ -19400,6 +20750,7 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { transmute(a) } @@ -19410,6 +20761,7 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { transmute(a) } @@ -19420,6 +20772,7 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { transmute(a) } @@ -19430,6 +20783,7 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { transmute(a) } @@ -19440,6 +20794,7 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { transmute(a) } @@ -19450,6 +20805,7 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { transmute(a) } @@ -19460,6 +20816,7 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { transmute(a) } @@ -19470,6 +20827,7 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { transmute(a) } @@ -19480,6 +20838,7 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { transmute(a) } @@ -19490,6 +20849,7 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { transmute(a) } @@ -19500,6 +20860,7 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { transmute(a) } @@ -19510,6 +20871,7 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { transmute(a) } @@ -19520,6 +20882,7 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { transmute(a) } @@ -19530,6 +20893,7 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { transmute(a) } @@ -19540,6 +20904,7 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { transmute(a) } @@ -19550,6 +20915,7 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { transmute(a) } @@ -19560,6 +20926,7 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { transmute(a) } @@ -19570,6 +20937,7 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { transmute(a) } @@ -19580,6 +20948,7 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { transmute(a) } @@ -19590,6 +20959,7 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { transmute(a) } @@ -19600,6 +20970,7 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { transmute(a) } @@ -19610,6 +20981,7 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { transmute(a) } @@ -19620,6 +20992,7 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { transmute(a) } @@ -19630,6 +21003,7 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { transmute(a) } @@ -19640,6 +21014,7 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { transmute(a) } @@ -19650,6 +21025,7 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { transmute(a) } @@ -19660,6 +21036,7 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { transmute(a) } @@ -19670,6 +21047,7 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { transmute(a) } @@ -19680,6 +21058,7 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { transmute(a) } @@ -19690,6 +21069,7 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { transmute(a) } @@ -19700,6 +21080,7 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { transmute(a) } @@ -19710,6 +21091,7 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { transmute(a) } @@ -19720,6 +21102,7 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { transmute(a) } @@ -19730,6 +21113,7 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { transmute(a) } @@ -19740,6 +21124,7 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { transmute(a) } @@ -19750,6 +21135,7 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { transmute(a) } @@ -19760,6 +21146,7 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { transmute(a) } @@ -19770,6 +21157,7 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { transmute(a) } @@ -19780,6 +21168,7 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { transmute(a) } @@ -19790,6 +21179,7 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { transmute(a) } @@ -19800,6 +21190,7 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { transmute(a) } @@ -19810,6 +21201,7 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { transmute(a) } @@ -19820,6 +21212,7 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { transmute(a) } @@ -19830,6 +21223,7 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { transmute(a) } @@ -19840,6 +21234,7 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { transmute(a) } @@ -19850,6 +21245,7 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { transmute(a) } @@ -19860,6 +21256,7 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { transmute(a) } @@ -19870,6 +21267,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { transmute(a) } @@ -19880,6 +21278,7 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { transmute(a) } @@ -19890,6 +21289,7 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { transmute(a) } @@ -19900,6 +21300,7 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { transmute(a) } @@ -19910,6 +21311,7 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { transmute(a) } @@ -19920,6 +21322,7 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { transmute(a) } @@ -19930,6 +21333,7 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { transmute(a) } @@ -19940,6 +21344,7 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { transmute(a) } @@ -19950,6 +21355,7 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { transmute(a) } @@ -19960,6 +21366,7 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { transmute(a) } @@ -19970,6 +21377,7 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { transmute(a) } @@ -19980,6 +21388,7 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { transmute(a) } @@ -19990,6 +21399,7 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { transmute(a) } @@ -20000,6 +21410,7 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { transmute(a) } @@ -20010,6 +21421,7 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { transmute(a) } @@ -20020,6 +21432,7 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { transmute(a) } @@ -20030,6 +21443,7 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { transmute(a) } @@ -20040,6 +21454,7 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { transmute(a) } @@ -20050,6 +21465,7 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { transmute(a) } @@ -20060,6 +21476,7 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { transmute(a) } @@ -20070,6 +21487,7 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { transmute(a) } @@ -20080,6 +21498,7 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { transmute(a) } @@ -20090,6 +21509,7 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { transmute(a) } @@ -20100,6 +21520,7 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { transmute(a) } @@ -20110,6 +21531,7 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { transmute(a) } @@ -20120,6 +21542,7 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { transmute(a) } @@ -20130,6 +21553,7 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { transmute(a) } @@ -20140,6 +21564,7 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { transmute(a) } @@ -20150,6 +21575,7 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { transmute(a) } @@ -20160,6 +21586,7 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { transmute(a) } @@ -20170,6 +21597,7 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { transmute(a) } @@ -20180,6 +21608,7 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { transmute(a) } @@ -20190,6 +21619,7 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { transmute(a) } @@ -20200,6 +21630,7 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { transmute(a) } @@ -20210,6 +21641,7 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { transmute(a) } @@ -20220,6 +21652,7 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { transmute(a) } @@ -20230,6 +21663,7 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { transmute(a) } @@ -20240,6 +21674,7 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { transmute(a) } @@ -20250,6 +21685,7 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { transmute(a) } @@ -20260,6 +21696,7 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { transmute(a) } @@ -20270,6 +21707,7 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { transmute(a) } @@ -20280,6 +21718,7 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { transmute(a) } @@ -20290,6 +21729,7 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { transmute(a) } @@ -20300,6 +21740,7 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { transmute(a) } @@ -20310,6 +21751,7 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { transmute(a) } @@ -20320,6 +21762,7 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { transmute(a) } @@ -20330,6 +21773,7 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { transmute(a) } @@ -20340,6 +21784,7 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { transmute(a) } @@ -20350,6 +21795,7 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { transmute(a) } @@ -20360,6 +21806,7 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { transmute(a) } @@ -20370,6 +21817,7 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { transmute(a) } @@ -20380,6 +21828,7 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { transmute(a) } @@ -20390,6 +21839,7 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { transmute(a) } @@ -20400,6 +21850,7 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { transmute(a) } @@ -20410,6 +21861,7 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { transmute(a) } @@ -20420,6 +21872,7 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { transmute(a) } @@ -20430,6 +21883,7 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { transmute(a) } @@ -20440,6 +21894,7 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { transmute(a) } @@ -20450,6 +21905,7 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { transmute(a) } @@ -20460,6 +21916,7 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { transmute(a) } @@ -20470,6 +21927,7 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { transmute(a) } @@ -20480,6 +21938,7 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { transmute(a) } @@ -20490,6 +21949,7 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { transmute(a) } @@ -20500,6 +21960,7 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { transmute(a) } @@ -20510,6 +21971,7 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { transmute(a) } @@ -20520,6 +21982,7 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { transmute(a) } @@ -20530,6 +21993,7 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { transmute(a) } @@ -20540,6 +22004,7 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { transmute(a) } @@ -20550,6 +22015,7 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { transmute(a) } @@ -20560,6 +22026,7 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { transmute(a) } @@ -20570,6 +22037,7 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { transmute(a) } @@ -20580,6 +22048,7 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { transmute(a) } @@ -20590,6 +22059,7 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { transmute(a) } @@ -20600,6 +22070,7 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { transmute(a) } @@ -20610,6 +22081,7 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { transmute(a) } @@ -20620,6 +22092,7 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { transmute(a) } @@ -20630,6 +22103,7 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { transmute(a) } @@ -20640,6 +22114,7 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { transmute(a) } @@ -20650,6 +22125,7 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { transmute(a) } @@ -20660,6 +22136,7 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { transmute(a) } @@ -20670,6 +22147,7 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { transmute(a) } @@ -20680,6 +22158,7 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { transmute(a) } @@ -20690,6 +22169,7 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { transmute(a) } @@ -20700,6 +22180,7 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { transmute(a) } @@ -20710,6 +22191,7 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { transmute(a) } @@ -20720,6 +22202,7 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { transmute(a) } @@ -20730,6 +22213,7 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { transmute(a) } @@ -20740,6 +22224,7 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { transmute(a) } @@ -20750,6 +22235,7 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { transmute(a) } @@ -20760,6 +22246,7 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { transmute(a) } @@ -20770,6 +22257,7 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { transmute(a) } @@ -20780,6 +22268,7 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { transmute(a) } @@ -20790,6 +22279,7 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { transmute(a) } @@ -20800,6 +22290,7 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { transmute(a) } @@ -20810,6 +22301,7 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { transmute(a) } @@ -20820,6 +22312,7 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { transmute(a) } @@ -20830,6 +22323,7 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { transmute(a) } @@ -20840,6 +22334,7 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { transmute(a) } @@ -20850,6 +22345,7 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { transmute(a) } @@ -20860,6 +22356,7 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { transmute(a) } @@ -20870,6 +22367,7 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { transmute(a) } @@ -20880,6 +22378,7 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { transmute(a) } @@ -20890,6 +22389,7 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { transmute(a) } @@ -20900,6 +22400,7 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { transmute(a) } @@ -20910,6 +22411,7 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { transmute(a) } @@ -20920,6 +22422,7 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { transmute(a) } @@ -20930,6 +22433,7 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { transmute(a) } @@ -20940,6 +22444,7 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { transmute(a) } @@ -20950,6 +22455,7 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { transmute(a) } @@ -20960,6 +22466,7 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { transmute(a) } @@ -20970,6 +22477,7 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { transmute(a) } @@ -20980,6 +22488,7 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { transmute(a) } @@ -20990,6 +22499,7 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { transmute(a) } @@ -21000,6 +22510,7 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { transmute(a) } @@ -21010,6 +22521,7 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { transmute(a) } @@ -21020,6 +22532,7 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { transmute(a) } @@ -21030,6 +22543,7 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { transmute(a) } @@ -21040,6 +22554,7 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { transmute(a) } @@ -21050,6 +22565,7 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { transmute(a) } @@ -21060,6 +22576,7 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { transmute(a) } @@ -21070,6 +22587,7 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { transmute(a) } @@ -21080,6 +22598,7 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { transmute(a) } @@ -21090,6 +22609,7 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { transmute(a) } @@ -21100,6 +22620,7 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { transmute(a) } @@ -21110,6 +22631,7 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21126,6 +22648,7 @@ vrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21142,6 +22665,7 @@ vrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21158,6 +22682,7 @@ vrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21174,6 +22699,7 @@ vrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21190,6 +22716,7 @@ vrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21206,6 +22733,7 @@ vrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21222,6 +22750,7 @@ vrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21238,6 +22767,7 @@ vrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21254,6 +22784,7 @@ vrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21270,6 +22801,7 @@ vrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21286,6 +22818,7 @@ vrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21302,6 +22835,7 @@ vrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21318,6 +22852,7 @@ vrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21334,6 +22869,7 @@ vrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21350,6 +22886,7 @@ vrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21367,6 +22904,7 @@ vrshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8((-N) as _)) @@ -21379,6 +22917,7 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8((-N) as _)) @@ -21391,6 +22930,7 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16((-N) as _)) @@ -21403,6 +22943,7 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16((-N) as _)) @@ -21415,6 +22956,7 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32((-N) as _)) @@ -21427,6 +22969,7 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32((-N) as _)) @@ -21439,6 +22982,7 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64((-N) as _)) @@ -21451,6 +22995,7 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64((-N) as _)) @@ -21463,6 +23008,7 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8((-N) as _)) @@ -21475,6 +23021,7 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8((-N) as _)) @@ -21487,6 +23034,7 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16((-N) as _)) @@ -21499,6 +23047,7 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16((-N) as _)) @@ -21511,6 +23060,7 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32((-N) as _)) @@ -21523,6 +23073,7 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32((-N) as _)) @@ -21535,6 +23086,7 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64((-N) as _)) @@ -21547,6 +23099,7 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64((-N) as _)) @@ -21556,7 +23109,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); @@ -21572,8 +23125,9 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -21588,7 +23142,7 @@ vrshrn_n_s16_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); @@ -21604,8 +23158,9 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -21620,7 +23175,7 @@ vrshrn_n_s32_(a, N) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); @@ -21636,8 +23191,9 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -21655,6 +23211,7 @@ vrshrn_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); transmute(vrshrn_n_s16::(transmute(a))) @@ -21667,6 +23224,7 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); transmute(vrshrn_n_s32::(transmute(a))) @@ -21679,6 +23237,7 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); transmute(vrshrn_n_s64::(transmute(a))) @@ -21691,6 +23250,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::(b)) @@ -21703,6 +23263,7 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::(b)) @@ -21715,6 +23276,7 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::(b)) @@ -21727,6 +23289,7 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::(b)) @@ -21739,6 +23302,7 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::(b)) @@ -21751,6 +23315,7 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::(b)) @@ -21763,6 +23328,7 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::(b)) @@ -21775,6 +23341,7 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::(b)) @@ -21787,6 +23354,7 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::(b)) @@ -21799,6 +23367,7 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::(b)) @@ -21811,6 +23380,7 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::(b)) @@ -21823,6 +23393,7 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::(b)) @@ -21835,6 +23406,7 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::(b)) @@ -21847,6 +23419,7 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::(b)) @@ -21859,6 +23432,7 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::(b)) @@ -21871,6 +23445,7 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::(b)) @@ -21882,6 +23457,7 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21898,6 +23474,7 @@ vrsubhn_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21914,6 +23491,7 @@ vrsubhn_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21930,6 +23508,7 @@ vrsubhn_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vrsubhn_s16(transmute(a), transmute(b))) } @@ -21940,6 +23519,7 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vrsubhn_s32(transmute(a), transmute(b))) } @@ -21950,6 +23530,7 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vrsubhn_s64(transmute(a), transmute(b))) } @@ -21961,6 +23542,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -21973,6 +23555,7 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -21985,6 +23568,7 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -21997,6 +23581,7 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -22009,6 +23594,7 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -22021,6 +23607,7 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -22033,6 +23620,7 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -22045,6 +23633,7 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -22057,6 +23646,7 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -22069,6 +23659,7 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -22081,6 +23672,7 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -22093,6 +23685,7 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -22105,6 +23698,7 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -22117,6 +23711,7 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -22129,6 +23724,7 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -22141,6 +23737,7 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -22153,6 +23750,7 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -22165,6 +23763,7 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -22177,6 +23776,7 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -22189,6 +23789,7 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -22201,6 +23802,7 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -22213,6 +23815,7 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -22225,6 +23828,7 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -22237,6 +23841,7 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -22248,6 +23853,7 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22264,6 +23870,7 @@ vshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22280,6 +23887,7 @@ vshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22296,6 +23904,7 @@ vshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22312,6 +23921,7 @@ vshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22328,6 +23938,7 @@ vshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22344,6 +23955,7 @@ vshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22360,6 +23972,7 @@ vshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22376,6 +23989,7 @@ vshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22392,6 +24006,7 @@ vshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22408,6 +24023,7 @@ vshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22424,6 +24040,7 @@ vshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22440,6 +24057,7 @@ vshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22456,6 +24074,7 @@ vshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22472,6 +24091,7 @@ vshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22488,6 +24108,7 @@ vshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22505,6 +24126,7 @@ vshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_s8(N as _)) @@ -22517,6 +24139,7 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_s8(N as _)) @@ -22529,6 +24152,7 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_s16(N as _)) @@ -22541,6 +24165,7 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_s16(N as _)) @@ -22553,6 +24178,7 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_s32(N as _)) @@ -22565,6 +24191,7 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_s32(N as _)) @@ -22577,6 +24204,7 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_u8(N as _)) @@ -22589,6 +24217,7 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_u8(N as _)) @@ -22601,6 +24230,7 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_u16(N as _)) @@ -22613,6 +24243,7 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_u16(N as _)) @@ -22625,6 +24256,7 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_u32(N as _)) @@ -22637,6 +24269,7 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_u32(N as _)) @@ -22649,6 +24282,7 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_s64(N as _)) @@ -22661,6 +24295,7 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_s64(N as _)) @@ -22673,6 +24308,7 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_u64(N as _)) @@ -22685,6 +24321,7 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_u64(N as _)) @@ -22697,6 +24334,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) @@ -22709,6 +24347,7 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) @@ -22721,6 +24360,7 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) @@ -22733,6 +24373,7 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) @@ -22745,6 +24386,7 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) @@ -22757,6 +24399,7 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) @@ -22769,6 +24412,7 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -22782,6 +24426,7 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -22795,6 +24440,7 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -22808,6 +24454,7 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -22821,6 +24468,7 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -22834,6 +24482,7 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -22847,6 +24496,7 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -22860,6 +24510,7 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -22873,6 +24524,7 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N }; @@ -22886,6 +24538,7 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N }; @@ -22899,6 +24552,7 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N }; @@ -22912,6 +24566,7 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N }; @@ -22925,6 +24580,7 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N }; @@ -22938,6 +24594,7 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N }; @@ -22951,6 +24608,7 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N }; @@ -22964,6 +24622,7 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N }; @@ -22977,6 +24636,7 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) @@ -22989,6 +24649,7 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) @@ -23001,6 +24662,7 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) @@ -23013,6 +24675,7 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) @@ -23025,6 +24688,7 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) @@ -23037,6 +24701,7 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) @@ -23049,6 +24714,7 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_s8::(b)) @@ -23061,6 +24727,7 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::(b)) @@ -23073,6 +24740,7 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_s16::(b)) @@ -23085,6 +24753,7 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::(b)) @@ -23097,6 +24766,7 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_s32::(b)) @@ -23109,6 +24779,7 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::(b)) @@ -23121,6 +24792,7 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_s64::(b)) @@ -23133,6 +24805,7 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::(b)) @@ -23145,6 +24818,7 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_u8::(b)) @@ -23157,6 +24831,7 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::(b)) @@ -23169,6 +24844,7 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_u16::(b)) @@ -23181,6 +24857,7 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::(b)) @@ -23193,6 +24870,7 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_u32::(b)) @@ -23205,6 +24883,7 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::(b)) @@ -23217,6 +24896,7 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_u64::(b)) @@ -23229,6 +24909,7 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::(b)) @@ -23240,6 +24921,7 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a1: int8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23252,6 +24934,7 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a1: int16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23264,6 +24947,7 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a1: int8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: int8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -23276,6 +24960,7 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a1: int16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23288,6 +24973,7 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a1: int32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23300,6 +24986,7 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a1: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23312,6 +24999,7 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a1: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23324,6 +25012,7 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a1: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: uint8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -23336,6 +25025,7 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a1: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23348,6 +25038,7 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a1: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23360,6 +25051,7 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a1: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23372,6 +25064,7 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a1: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: poly16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23384,6 +25077,7 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a1: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: poly8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -23396,6 +25090,7 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a1: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -23408,6 +25103,7 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a1: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23420,6 +25116,7 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a1: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23432,6 +25129,7 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a1: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23444,6 +25142,7 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a1: float32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: float32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -23456,6 +25155,7 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23468,6 +25168,7 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23480,6 +25181,7 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23492,6 +25194,7 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23504,6 +25207,7 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23516,6 +25220,7 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: poly16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23528,6 +25233,7 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23540,6 +25246,7 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23552,6 +25259,7 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: int8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -23564,6 +25272,7 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23576,6 +25285,7 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23588,6 +25298,7 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: uint8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -23600,6 +25311,7 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23612,6 +25324,7 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23624,6 +25337,7 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: poly8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -23636,6 +25350,7 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -23648,6 +25363,7 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23660,6 +25376,7 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: float32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -23672,6 +25389,7 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23684,6 +25402,7 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23696,6 +25415,7 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: int8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -23708,6 +25428,7 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23720,6 +25441,7 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23732,6 +25454,7 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23744,6 +25467,7 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23756,6 +25480,7 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: uint8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -23768,6 +25493,7 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23780,6 +25506,7 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23792,6 +25519,7 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23804,6 +25532,7 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: poly16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23816,6 +25545,7 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: poly8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -23828,6 +25558,7 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -23840,6 +25571,7 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23852,6 +25584,7 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23864,6 +25597,7 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -23876,6 +25610,7 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: float32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -23888,6 +25623,7 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { let d: uint8x8_t = vabd_u8(b, c); simd_add(a, simd_cast(d)) @@ -23899,6 +25635,7 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { let d: uint16x4_t = vabd_u16(b, c); simd_add(a, simd_cast(d)) @@ -23910,6 +25647,7 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) @@ -23921,6 +25659,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let d: int8x8_t = vabd_s8(b, c); let e: uint8x8_t = simd_cast(d); @@ -23933,6 +25672,7 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let d: int16x4_t = vabd_s16(b, c); let e: uint16x4_t = simd_cast(d); @@ -23945,6 +25685,7 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let d: int32x2_t = vabd_s32(b, c); let e: uint32x2_t = simd_cast(d); @@ -23957,6 +25698,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -23973,6 +25715,7 @@ vqabs_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -23989,6 +25732,7 @@ vqabsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -24005,6 +25749,7 @@ vqabs_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -24021,6 +25766,7 @@ vqabsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -24037,6 +25783,7 @@ vqabs_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] +#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 4b26e70b00..7d170a00d1 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -18,107 +18,179 @@ pub(crate) type p128 = u128; types! { /// ARM-specific 64-bit wide vector of eight packed `i8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); /// ARM-specific 64-bit wide vector of eight packed `u8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); /// ARM-specific 64-bit wide polynomial vector of eight packed `p8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8); /// ARM-specific 64-bit wide vector of four packed `i16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 64-bit wide vector of four packed `u16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 64-bit wide vector of four packed `f16`. // pub struct float16x4_t(f16, f16, f16, f16); /// ARM-specific 64-bit wide vector of four packed `p16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 64-bit wide vector of two packed `i32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x2_t(pub(crate) i32, pub(crate) i32); /// ARM-specific 64-bit wide vector of two packed `u32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x2_t(pub(crate) u32, pub(crate) u32); /// ARM-specific 64-bit wide vector of two packed `f32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x2_t(pub(crate) f32, pub(crate) f32); /// ARM-specific 64-bit wide vector of one packed `i64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x1_t(pub(crate) i64); /// ARM-specific 64-bit wide vector of one packed `u64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x1_t(pub(crate) u64); /// ARM-specific 64-bit wide vector of one packed `p64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x1_t(pub(crate) p64); /// ARM-specific 128-bit wide vector of sixteen packed `i8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x16_t( pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, ); /// ARM-specific 128-bit wide vector of sixteen packed `u8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x16_t( pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, ); /// ARM-specific 128-bit wide vector of sixteen packed `p8`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x16_t( pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, ); /// ARM-specific 128-bit wide vector of eight packed `i16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 128-bit wide vector of eight packed `u16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 128-bit wide vector of eight packed `f16`. // pub struct float16x8_t(f16, f16, f16, f16, f16, f16, f16); /// ARM-specific 128-bit wide vector of eight packed `p16`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 128-bit wide vector of four packed `i32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); /// ARM-specific 128-bit wide vector of four packed `u32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); /// ARM-specific 128-bit wide vector of four packed `f32`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); /// ARM-specific 128-bit wide vector of two packed `i64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x2_t(pub(crate) i64, pub(crate) i64); /// ARM-specific 128-bit wide vector of two packed `u64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x2_t(pub(crate) u64, pub(crate) u64); /// ARM-specific 128-bit wide vector of two packed `p64`. + #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x2_t(pub(crate) p64, pub(crate) p64); } /// ARM-specific type containing two `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t); /// ARM-specific type containing three `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing four `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing two `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); /// ARM-specific type containing three `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing four `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing two `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing three `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing four `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing two `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing three `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing four `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x4_t( pub uint8x16_t, pub uint8x16_t, @@ -128,22 +200,46 @@ pub struct uint8x16x4_t( /// ARM-specific type containing two `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing three `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing four `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing two `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing three `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing four `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x4_t( pub poly8x16_t, pub poly8x16_t, @@ -153,32 +249,68 @@ pub struct poly8x16x4_t( /// ARM-specific type containing two `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t); /// ARM-specific type containing three `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing four `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing two `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t); /// ARM-specific type containing three `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing four `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing two `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing three `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing four `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x4_t( pub uint16x4_t, pub uint16x4_t, @@ -188,12 +320,24 @@ pub struct uint16x4x4_t( /// ARM-specific type containing two `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing three `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing four `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x4_t( pub uint16x8_t, pub uint16x8_t, @@ -203,12 +347,24 @@ pub struct uint16x8x4_t( /// ARM-specific type containing two `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing three `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing four `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x4_t( pub poly16x4_t, pub poly16x4_t, @@ -218,12 +374,24 @@ pub struct poly16x4x4_t( /// ARM-specific type containing two `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing three `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing four `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x4_t( pub poly16x8_t, pub poly16x8_t, @@ -233,32 +401,68 @@ pub struct poly16x8x4_t( /// ARM-specific type containing two `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t); /// ARM-specific type containing three `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing four `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing two `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t); /// ARM-specific type containing three `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing four `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing two `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing three `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing four `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x4_t( pub uint32x2_t, pub uint32x2_t, @@ -268,12 +472,24 @@ pub struct uint32x2x4_t( /// ARM-specific type containing two `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing three `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing four `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x4_t( pub uint32x4_t, pub uint32x4_t, @@ -283,12 +499,24 @@ pub struct uint32x4x4_t( /// ARM-specific type containing two `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t); /// ARM-specific type containing three `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t); /// ARM-specific type containing four `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x4_t( pub float32x2_t, pub float32x2_t, @@ -298,12 +526,24 @@ pub struct float32x2x4_t( /// ARM-specific type containing two `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t); /// ARM-specific type containing three `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t); /// ARM-specific type containing four `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x4_t( pub float32x4_t, pub float32x4_t, @@ -313,32 +553,68 @@ pub struct float32x4x4_t( /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x4_t( pub uint64x1_t, pub uint64x1_t, @@ -348,12 +624,24 @@ pub struct uint64x1x4_t( /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x4_t( pub uint64x2_t, pub uint64x2_t, @@ -363,12 +651,24 @@ pub struct uint64x2x4_t( /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x4_t( pub poly64x1_t, pub poly64x1_t, @@ -378,12 +678,24 @@ pub struct poly64x1x4_t( /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x4_t( pub poly64x2_t, pub poly64x2_t, @@ -586,6 +898,10 @@ extern "unadjusted" { #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -598,6 +914,10 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -610,6 +930,10 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -622,6 +946,10 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -634,6 +962,10 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -646,6 +978,10 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -658,6 +994,10 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -670,6 +1010,10 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -682,6 +1026,10 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -694,6 +1042,10 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -706,6 +1058,10 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -718,6 +1074,10 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -730,6 +1090,10 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -742,6 +1106,10 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -754,6 +1122,10 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -766,6 +1138,10 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -778,6 +1154,10 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -790,6 +1170,10 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -802,6 +1186,10 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -814,6 +1202,10 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -826,6 +1218,10 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -838,6 +1234,10 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -850,6 +1250,10 @@ pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -862,6 +1266,10 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -873,6 +1281,10 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -884,6 +1296,10 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -895,6 +1311,10 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -906,6 +1326,10 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -917,6 +1341,10 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -928,6 +1356,10 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -939,6 +1371,10 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg(target_arch = "aarch64")] { @@ -956,6 +1392,10 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -967,6 +1407,10 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -978,6 +1422,10 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -989,6 +1437,10 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1000,6 +1452,10 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1011,6 +1467,10 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1022,6 +1482,10 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1033,6 +1497,10 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1050,6 +1518,10 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1061,6 +1533,10 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1072,6 +1548,10 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1083,6 +1563,10 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1094,6 +1578,10 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1105,6 +1593,10 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); simd_shuffle2!(x, x, [0, 0]) @@ -1116,6 +1608,10 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1133,6 +1629,10 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1144,6 +1644,10 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1155,6 +1659,10 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, vabd_s8(b, c)) } @@ -1163,6 +1671,10 @@ pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, vabd_s16(b, c)) } @@ -1171,6 +1683,10 @@ pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, vabd_s32(b, c)) } @@ -1179,6 +1695,10 @@ pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, vabd_u8(b, c)) } @@ -1187,6 +1707,10 @@ pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, vabd_u16(b, c)) } @@ -1195,6 +1719,10 @@ pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, vabd_u32(b, c)) } @@ -1204,6 +1732,10 @@ pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, vabdq_s8(b, c)) } @@ -1212,6 +1744,10 @@ pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, vabdq_s16(b, c)) } @@ -1220,6 +1756,10 @@ pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, vabdq_s32(b, c)) } @@ -1228,6 +1768,10 @@ pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, vabdq_u8(b, c)) } @@ -1236,6 +1780,10 @@ pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, vabdq_u16(b, c)) } @@ -1244,6 +1792,10 @@ pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, vabdq_u32(b, c)) } @@ -1254,6 +1806,10 @@ pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { vabs_s8_(a) } @@ -1263,6 +1819,10 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { vabs_s16_(a) } @@ -1272,6 +1832,10 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { vabs_s32_(a) } @@ -1281,6 +1845,10 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { vabsq_s8_(a) } @@ -1290,6 +1858,10 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { vabsq_s16_(a) } @@ -1299,6 +1871,10 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { vabsq_s32_(a) } @@ -1309,6 +1885,10 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpadd_s16_(a, b) } @@ -1318,6 +1898,10 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpadd_s32_(a, b) } @@ -1327,6 +1911,10 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpadd_s8_(a, b) } @@ -1336,6 +1924,10 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { transmute(vpadd_s16_(transmute(a), transmute(b))) } @@ -1345,6 +1937,10 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vpadd_s32_(transmute(a), transmute(b))) } @@ -1354,6 +1950,10 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vpadd_s8_(transmute(a), transmute(b))) } @@ -1364,6 +1964,10 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_add(a, b) } @@ -1374,6 +1978,10 @@ pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_add(a, b) } @@ -1384,6 +1992,10 @@ pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_add(a, b) } @@ -1394,6 +2006,10 @@ pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_add(a, b) } @@ -1404,6 +2020,10 @@ pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_add(a, b) } @@ -1414,6 +2034,10 @@ pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_add(a, b) } @@ -1424,6 +2048,10 @@ pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_add(a, b) } @@ -1434,6 +2062,10 @@ pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_add(a, b) } @@ -1444,6 +2076,10 @@ pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_add(a, b) } @@ -1454,6 +2090,10 @@ pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_add(a, b) } @@ -1464,6 +2104,10 @@ pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_add(a, b) } @@ -1474,6 +2118,10 @@ pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_add(a, b) } @@ -1484,6 +2132,10 @@ pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_add(a, b) } @@ -1494,6 +2146,10 @@ pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_add(a, b) } @@ -1504,6 +2160,10 @@ pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_add(a, b) } @@ -1514,6 +2174,10 @@ pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_add(a, b) } @@ -1524,6 +2188,10 @@ pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let a: int16x8_t = simd_cast(a); let b: int16x8_t = simd_cast(b); @@ -1536,6 +2204,10 @@ pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let a: int32x4_t = simd_cast(a); let b: int32x4_t = simd_cast(b); @@ -1548,6 +2220,10 @@ pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let a: int64x2_t = simd_cast(a); let b: int64x2_t = simd_cast(b); @@ -1560,6 +2236,10 @@ pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let a: uint16x8_t = simd_cast(a); let b: uint16x8_t = simd_cast(b); @@ -1572,6 +2252,10 @@ pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let a: uint32x4_t = simd_cast(a); let b: uint32x4_t = simd_cast(b); @@ -1584,6 +2268,10 @@ pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let a: uint64x2_t = simd_cast(a); let b: uint64x2_t = simd_cast(b); @@ -1596,6 +2284,10 @@ pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -1610,6 +2302,10 @@ pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -1624,6 +2320,10 @@ pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -1638,6 +2338,10 @@ pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -1652,6 +2356,10 @@ pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -1666,6 +2374,10 @@ pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -1680,6 +2392,10 @@ pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { let b: int16x8_t = simd_cast(b); simd_add(a, b) @@ -1691,6 +2407,10 @@ pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { let b: int32x4_t = simd_cast(b); simd_add(a, b) @@ -1702,6 +2422,10 @@ pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { let b: int64x2_t = simd_cast(b); simd_add(a, b) @@ -1713,6 +2437,10 @@ pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { let b: uint16x8_t = simd_cast(b); simd_add(a, b) @@ -1724,6 +2452,10 @@ pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { let b: uint32x4_t = simd_cast(b); simd_add(a, b) @@ -1735,6 +2467,10 @@ pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { let b: uint64x2_t = simd_cast(b); simd_add(a, b) @@ -1746,6 +2482,10 @@ pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int16x8_t = simd_cast(b); @@ -1758,6 +2498,10 @@ pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let b: int32x4_t = simd_cast(b); @@ -1770,6 +2514,10 @@ pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); let b: int64x2_t = simd_cast(b); @@ -1782,6 +2530,10 @@ pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint16x8_t = simd_cast(b); @@ -1794,6 +2546,10 @@ pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let b: uint32x4_t = simd_cast(b); @@ -1806,6 +2562,10 @@ pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); let b: uint64x2_t = simd_cast(b); @@ -1818,6 +2578,10 @@ pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -1828,6 +2592,10 @@ pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))) } @@ -1838,6 +2606,10 @@ pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))) } @@ -1848,6 +2620,10 @@ pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -1858,6 +2634,10 @@ pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))) } @@ -1868,6 +2648,10 @@ pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))) } @@ -1878,6 +2662,10 @@ pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -1889,6 +2677,10 @@ pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -1900,6 +2692,10 @@ pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -1911,6 +2707,10 @@ pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -1922,6 +2722,10 @@ pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uin #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -1933,6 +2737,10 @@ pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -1944,6 +2752,10 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { vraddhn_s16_(a, b) } @@ -1954,6 +2766,10 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { vraddhn_s32_(a, b) } @@ -1964,6 +2780,10 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { vraddhn_s64_(a, b) } @@ -1974,6 +2794,10 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vraddhn_s16_(transmute(a), transmute(b))) } @@ -1984,6 +2808,10 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vraddhn_s32_(transmute(a), transmute(b))) } @@ -1994,6 +2822,10 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vraddhn_s64_(transmute(a), transmute(b))) } @@ -2004,6 +2836,10 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = vraddhn_s16_(a, b); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2015,6 +2851,10 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = vraddhn_s32_(a, b); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2026,6 +2866,10 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = vraddhn_s64_(a, b); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2037,6 +2881,10 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2048,6 +2896,10 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2059,6 +2911,10 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2070,6 +2926,10 @@ pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> u #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { vpaddl_s8_(a) } @@ -2080,6 +2940,10 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { vpaddl_s16_(a) } @@ -2090,6 +2954,10 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { vpaddl_s32_(a) } @@ -2100,6 +2968,10 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { vpaddlq_s8_(a) } @@ -2110,6 +2982,10 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { vpaddlq_s16_(a) } @@ -2120,6 +2996,10 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { vpaddlq_s32_(a) } @@ -2130,6 +3010,10 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { vpaddl_u8_(a) } @@ -2140,6 +3024,10 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { vpaddl_u16_(a) } @@ -2150,6 +3038,10 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { vpaddl_u32_(a) } @@ -2160,6 +3052,10 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { vpaddlq_u8_(a) } @@ -2170,6 +3066,10 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { vpaddlq_u16_(a) } @@ -2180,6 +3080,10 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { vpaddlq_u32_(a) } @@ -2190,6 +3094,10 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { simd_cast(a) } @@ -2200,6 +3108,10 @@ pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { simd_cast(a) } @@ -2210,6 +3122,10 @@ pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { simd_cast(a) } @@ -2220,6 +3136,10 @@ pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { simd_cast(a) } @@ -2230,6 +3150,10 @@ pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { simd_cast(a) } @@ -2240,6 +3164,10 @@ pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { simd_cast(a) } @@ -2250,6 +3178,10 @@ pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { simd_cast(a) } @@ -2260,6 +3192,10 @@ pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { simd_cast(a) } @@ -2270,6 +3206,10 @@ pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { simd_cast(a) } @@ -2280,6 +3220,10 @@ pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { simd_cast(a) } @@ -2290,6 +3234,10 @@ pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { simd_cast(a) } @@ -2300,6 +3248,10 @@ pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { simd_cast(a) } @@ -2310,6 +3262,10 @@ pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { let b = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -2321,6 +3277,10 @@ pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { let b = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2334,6 +3294,10 @@ pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { let b = int16x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -2345,6 +3309,10 @@ pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { let b = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -2356,6 +3324,10 @@ pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { let b = int32x2_t(-1, -1); simd_xor(a, b) @@ -2367,6 +3339,10 @@ pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { let b = int32x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -2378,6 +3354,10 @@ pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { let b = uint8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -2389,6 +3369,10 @@ pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { let b = uint8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -2402,6 +3386,10 @@ pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { let b = uint16x4_t(65_535, 65_535, 65_535, 65_535); simd_xor(a, b) @@ -2413,6 +3401,10 @@ pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { let b = uint16x8_t( 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, @@ -2426,6 +3418,10 @@ pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { let b = uint32x2_t(4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -2437,6 +3433,10 @@ pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { let b = uint32x4_t(4_294_967_295, 4_294_967_295, 4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -2448,6 +3448,10 @@ pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { let b = poly8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -2459,6 +3463,10 @@ pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { let b = poly8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -2472,6 +3480,10 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2483,6 +3495,10 @@ pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2496,6 +3512,10 @@ pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2507,6 +3527,10 @@ pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2518,6 +3542,10 @@ pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -2529,6 +3557,10 @@ pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2540,6 +3572,10 @@ pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, c), a) @@ -2551,6 +3587,10 @@ pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -2562,6 +3602,10 @@ pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2573,6 +3617,10 @@ pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2586,6 +3634,10 @@ pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2597,6 +3649,10 @@ pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2608,6 +3664,10 @@ pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2619,6 +3679,10 @@ pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2630,6 +3694,10 @@ pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, transmute(c)), a) @@ -2641,6 +3709,10 @@ pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2656,6 +3728,10 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -2666,6 +3742,10 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -2676,6 +3756,10 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -2686,6 +3770,10 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -2696,6 +3784,10 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -2706,6 +3798,10 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -2716,6 +3812,10 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -2726,6 +3826,10 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -2736,6 +3840,10 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -2746,6 +3854,10 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -2756,6 +3868,10 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -2766,6 +3882,10 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -2776,6 +3896,10 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -2786,6 +3910,10 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -2796,6 +3924,10 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -2806,6 +3938,10 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -2816,6 +3952,10 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -2826,6 +3966,10 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -2836,6 +3980,10 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -2846,6 +3994,10 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -2856,6 +4008,10 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -2866,6 +4022,10 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -2876,6 +4036,10 @@ pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -2887,6 +4051,10 @@ pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2900,6 +4068,10 @@ pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -2911,6 +4083,10 @@ pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -2922,6 +4098,10 @@ pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -2933,6 +4113,10 @@ pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -2944,6 +4128,10 @@ pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, c), a) @@ -2955,6 +4143,10 @@ pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -2966,6 +4158,10 @@ pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -2977,6 +4173,10 @@ pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2990,6 +4190,10 @@ pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3001,6 +4205,10 @@ pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3012,6 +4220,10 @@ pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3023,6 +4235,10 @@ pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3034,6 +4250,10 @@ pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, transmute(c)), a) @@ -3045,6 +4265,10 @@ pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3056,6 +4280,10 @@ pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmins_v8i8(a, b) } @@ -3066,6 +4294,10 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmins_v4i16(a, b) } @@ -3076,6 +4308,10 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmins_v2i32(a, b) } @@ -3086,6 +4322,10 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpminu_v8i8(a, b) } @@ -3096,6 +4336,10 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpminu_v4i16(a, b) } @@ -3106,6 +4350,10 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpminu_v2i32(a, b) } @@ -3116,6 +4364,10 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpminf_v2f32(a, b) } @@ -3126,6 +4378,10 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmaxs_v8i8(a, b) } @@ -3136,6 +4392,10 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmaxs_v4i16(a, b) } @@ -3146,6 +4406,10 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmaxs_v2i32(a, b) } @@ -3156,6 +4420,10 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpmaxu_v8i8(a, b) } @@ -3166,6 +4434,10 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpmaxu_v4i16(a, b) } @@ -3176,6 +4448,10 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpmaxu_v2i32(a, b) } @@ -3186,6 +4462,10 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpmaxf_v2f32(a, b) } @@ -3196,6 +4476,10 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3207,6 +4491,10 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, 0) @@ -3218,6 +4506,10 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3229,6 +4521,10 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3240,6 +4536,10 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3251,6 +4551,10 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3262,6 +4566,10 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3273,6 +4581,10 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3284,6 +4596,10 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3295,6 +4611,10 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -3306,6 +4626,10 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3317,6 +4641,10 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -3328,6 +4656,10 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3339,6 +4671,10 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3350,6 +4686,10 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3361,6 +4701,10 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3372,6 +4716,10 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3383,6 +4731,10 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3394,6 +4746,10 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3405,6 +4761,10 @@ pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3416,6 +4776,10 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3427,6 +4791,10 @@ pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3438,6 +4806,10 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3449,6 +4821,10 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3460,6 +4836,10 @@ pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3470,6 +4850,10 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3480,6 +4864,10 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3490,6 +4878,10 @@ pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 1)) } @@ -3500,6 +4892,10 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3510,6 +4906,10 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3520,6 +4920,10 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3530,6 +4934,10 @@ pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 1)) } @@ -3540,6 +4948,10 @@ pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3550,6 +4962,10 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3560,6 +4976,10 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3578,6 +4998,10 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3587,6 +5011,10 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3596,6 +5024,10 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 0)) } @@ -3605,6 +5037,10 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3614,6 +5050,10 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3623,6 +5063,10 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3632,6 +5076,10 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 0)) } @@ -3641,6 +5089,10 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3650,6 +5102,10 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3659,6 +5115,10 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3669,6 +5129,10 @@ pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { int8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -3682,6 +5146,10 @@ pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { int16x8_t(value, value, value, value, value, value, value, value) } @@ -3692,6 +5160,10 @@ pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { int32x4_t(value, value, value, value) } @@ -3702,6 +5174,10 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { int64x2_t(value, value) } @@ -3712,6 +5188,10 @@ pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { uint8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -3725,6 +5205,10 @@ pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { uint16x8_t(value, value, value, value, value, value, value, value) } @@ -3735,6 +5219,10 @@ pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { uint32x4_t(value, value, value, value) } @@ -3745,6 +5233,10 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { uint64x2_t(value, value) } @@ -3755,6 +5247,10 @@ pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { poly8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -3768,6 +5264,10 @@ pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { poly16x8_t(value, value, value, value, value, value, value, value) } @@ -3778,6 +5278,10 @@ pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { float32x4_t(value, value, value, value) } @@ -3791,6 +5295,10 @@ pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { float32x4_t(value, value, value, value) } @@ -3801,6 +5309,10 @@ unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { int8x8_t(value, value, value, value, value, value, value, value) } @@ -3811,6 +5323,10 @@ pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { int16x4_t(value, value, value, value) } @@ -3821,6 +5337,10 @@ pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { int32x2_t(value, value) } @@ -3831,6 +5351,10 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { int64x1_t(value) } @@ -3841,6 +5365,10 @@ pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { uint8x8_t(value, value, value, value, value, value, value, value) } @@ -3851,6 +5379,10 @@ pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { uint16x4_t(value, value, value, value) } @@ -3861,6 +5393,10 @@ pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { uint32x2_t(value, value) } @@ -3871,6 +5407,10 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { uint64x1_t(value) } @@ -3881,6 +5421,10 @@ pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { poly8x8_t(value, value, value, value, value, value, value, value) } @@ -3891,6 +5435,10 @@ pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { poly16x4_t(value, value, value, value) } @@ -3901,6 +5449,10 @@ pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { float32x2_t(value, value) } @@ -3914,6 +5466,10 @@ pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { float32x2_t(value, value) } @@ -3924,6 +5480,10 @@ unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vldrq_p128(a: *const p128) -> p128 { *a } @@ -3934,6 +5494,10 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { *a = b; } @@ -3944,6 +5508,10 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { vdup_n_s8(value) } @@ -3954,6 +5522,10 @@ pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { vdup_n_s16(value) } @@ -3964,6 +5536,10 @@ pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { vdup_n_s32(value) } @@ -3974,6 +5550,10 @@ pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { vdup_n_s64(value) } @@ -3984,6 +5564,10 @@ pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { vdup_n_u8(value) } @@ -3994,6 +5578,10 @@ pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { vdup_n_u16(value) } @@ -4004,6 +5592,10 @@ pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { vdup_n_u32(value) } @@ -4014,6 +5606,10 @@ pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { vdup_n_u64(value) } @@ -4024,6 +5620,10 @@ pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { vdup_n_p8(value) } @@ -4034,6 +5634,10 @@ pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { vdup_n_p16(value) } @@ -4044,6 +5648,10 @@ pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { vdup_n_f32(value) } @@ -4054,6 +5662,10 @@ pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { vdupq_n_s8(value) } @@ -4064,6 +5676,10 @@ pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { vdupq_n_s16(value) } @@ -4074,6 +5690,10 @@ pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { vdupq_n_s32(value) } @@ -4084,6 +5704,10 @@ pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { vdupq_n_s64(value) } @@ -4094,6 +5718,10 @@ pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { vdupq_n_u8(value) } @@ -4104,6 +5732,10 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { vdupq_n_u16(value) } @@ -4114,6 +5746,10 @@ pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { vdupq_n_u32(value) } @@ -4124,6 +5760,10 @@ pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { vdupq_n_u64(value) } @@ -4134,6 +5774,10 @@ pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { vdupq_n_p8(value) } @@ -4144,6 +5788,10 @@ pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { vdupq_n_p16(value) } @@ -4154,6 +5802,10 @@ pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { vdupq_n_f32(value) } @@ -4165,10 +5817,12 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { - if N != 0 { - unreachable_unchecked() - } + static_assert!(N : i32 where N == 0); a } @@ -4179,10 +5833,12 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))] #[rustc_legacy_const_generics(2)] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { - if N != 0 { - unreachable_unchecked() - } + static_assert!(N : i32 where N == 0); a } @@ -4192,6 +5848,10 @@ pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { vcnt_s8_(a) } @@ -4201,6 +5861,10 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { vcntq_s8_(a) } @@ -4210,6 +5874,10 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -4219,6 +5887,10 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -4228,6 +5900,10 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -4237,6 +5913,10 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -4247,6 +5927,10 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4257,6 +5941,10 @@ pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4267,6 +5955,10 @@ pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4277,6 +5969,10 @@ pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4287,6 +5983,10 @@ pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4297,6 +5997,10 @@ pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4307,6 +6011,10 @@ pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4317,6 +6025,10 @@ pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4327,6 +6039,10 @@ pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4337,6 +6053,10 @@ pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4347,6 +6067,10 @@ pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4357,6 +6081,10 @@ pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4367,6 +6095,10 @@ pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4377,6 +6109,10 @@ pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4387,6 +6123,10 @@ pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4397,6 +6137,10 @@ pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4407,6 +6151,10 @@ pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4417,6 +6165,10 @@ pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4427,6 +6179,10 @@ pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -4437,6 +6193,10 @@ pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -4447,6 +6207,10 @@ pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -4457,6 +6221,10 @@ pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4467,6 +6235,10 @@ pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -4477,6 +6249,10 @@ pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4487,6 +6263,10 @@ pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -4497,6 +6277,10 @@ pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -4507,6 +6291,10 @@ pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -4517,6 +6305,10 @@ pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4527,6 +6319,10 @@ pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -4537,6 +6333,10 @@ pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4547,6 +6347,10 @@ pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -4557,6 +6361,10 @@ pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4567,6 +6375,10 @@ pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -4577,6 +6389,10 @@ pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -4587,6 +6403,10 @@ pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -4597,6 +6417,10 @@ pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4607,6 +6431,10 @@ pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg(target_arch = "arm")] { @@ -4624,6 +6452,10 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg(target_arch = "arm")] { @@ -4641,6 +6473,10 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg(target_arch = "arm")] { @@ -4658,6 +6494,10 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg(target_arch = "arm")] { @@ -4675,6 +6515,10 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg(target_arch = "arm")] { @@ -4692,6 +6536,10 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg(target_arch = "arm")] { @@ -4709,6 +6557,10 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg(target_arch = "arm")] { @@ -4726,6 +6578,10 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg(target_arch = "arm")] { @@ -4743,6 +6599,10 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg(target_arch = "arm")] { @@ -4760,6 +6620,10 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg(target_arch = "arm")] { @@ -4777,6 +6641,10 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg(target_arch = "arm")] { @@ -4794,6 +6662,10 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg(target_arch = "arm")] { diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index acf78ca82f..ec28bd62c4 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -50,8 +50,9 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "aarch64", doc))] #[doc(cfg(target_arch = "aarch64"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub mod aarch64 { + #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use crate::core_arch::aarch64::*; } diff --git a/crates/stdarch-gen/src/main.rs b/crates/stdarch-gen/src/main.rs index 6a45ec1e41..0295a2296c 100644 --- a/crates/stdarch-gen/src/main.rs +++ b/crates/stdarch-gen/src/main.rs @@ -1064,9 +1064,9 @@ fn gen_aarch64( }; let current_target = match target { Default => "neon", - ArmV7 => "v7", - Vfp4 => "vfp4", - FPArmV8 => "fp-armv8,v8", + ArmV7 => "neon", + Vfp4 => "neon", + FPArmV8 => "neon", AES => "neon,aes", FCMA => "neon,fcma", Dotprod => "neon,dotprod", @@ -1367,12 +1367,18 @@ fn gen_aarch64( } } }; + let stable = match target { + Default | ArmV7 | Vfp4 | FPArmV8 | AES => { + String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") + } + _ => String::new(), + }; let function = format!( r#" {} #[inline] #[target_feature(enable = "{}")] -#[cfg_attr(test, assert_instr({}{}))]{} +#[cfg_attr(test, assert_instr({}{}))]{}{} {}{{ {} }} @@ -1382,6 +1388,7 @@ fn gen_aarch64( current_aarch64, const_assert, const_legacy, + stable, fn_decl, call_params ); @@ -2327,20 +2334,26 @@ fn gen_arm( fn_decl, multi_calls, ext_c_aarch64, aarch64_params ) }; + let stable_aarch64 = match target { + Default | ArmV7 | Vfp4 | FPArmV8 | AES => { + String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") + } + _ => String::new(), + }; format!( r#" {} #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,{}")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr({}{}))]{} +#[cfg_attr(test, assert_instr({}{}))]{} {} {} #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "{}")] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{} +#[cfg_attr(test, assert_instr({}{}))]{}{} {} "#, current_comment, @@ -2354,6 +2367,7 @@ fn gen_arm( expand_intrinsic(¤t_aarch64, in_t[1]), const_assert, const_legacy, + stable_aarch64, call_aarch64, ) } else { @@ -2389,6 +2403,10 @@ fn gen_arm( String::new() } }; + let stable_aarch64 = match target { + Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), + _ => String::new(), + }; format!( r#" {} @@ -2396,7 +2414,7 @@ fn gen_arm( #[target_feature(enable = "{}")] #[cfg_attr(target_arch = "arm", target_feature(enable = "{}"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr({}{}))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{} +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{}{} {} "#, current_comment, @@ -2407,6 +2425,7 @@ fn gen_arm( expand_intrinsic(¤t_aarch64, in_t[1]), const_assert, const_legacy, + stable_aarch64, call, ) };