#[rustfmt::skip]
mod generated;
#[rustfmt::skip]
pub use self::generated::*;
use crate::{
core_arch::simd::*, core_arch::simd_llvm::*, hint::unreachable_unchecked, mem::transmute,
};
#[cfg(test)]
use stdarch_test::assert_instr;
pub(crate) type p8 = u8;
pub(crate) type p16 = u16;
pub(crate) type p64 = u64;
pub(crate) type p128 = u128;
types! {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int32x2_t(pub(crate) i32, pub(crate) i32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint32x2_t(pub(crate) u32, pub(crate) u32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct float32x2_t(pub(crate) f32, pub(crate) f32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int64x1_t(pub(crate) i64);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint64x1_t(pub(crate) u64);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly64x1_t(pub(crate) p64);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int8x16_t(
pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8,
pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8,
);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint8x16_t(
pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8,
pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8,
);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly8x16_t(
pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8,
pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8,
);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct int64x2_t(pub(crate) i64, pub(crate) i64);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct uint64x2_t(pub(crate) u64, pub(crate) u64);
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub struct poly64x2_t(pub(crate) p64, pub(crate) p64);
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint8x16x4_t(
pub uint8x16_t,
pub uint8x16_t,
pub uint8x16_t,
pub uint8x16_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly8x16x4_t(
pub poly8x16_t,
pub poly8x16_t,
pub poly8x16_t,
pub poly8x16_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x4x4_t(
pub uint16x4_t,
pub uint16x4_t,
pub uint16x4_t,
pub uint16x4_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint16x8x4_t(
pub uint16x8_t,
pub uint16x8_t,
pub uint16x8_t,
pub uint16x8_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x4x4_t(
pub poly16x4_t,
pub poly16x4_t,
pub poly16x4_t,
pub poly16x4_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly16x8x4_t(
pub poly16x8_t,
pub poly16x8_t,
pub poly16x8_t,
pub poly16x8_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x2x4_t(
pub uint32x2_t,
pub uint32x2_t,
pub uint32x2_t,
pub uint32x2_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint32x4x4_t(
pub uint32x4_t,
pub uint32x4_t,
pub uint32x4_t,
pub uint32x4_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x2x4_t(
pub float32x2_t,
pub float32x2_t,
pub float32x2_t,
pub float32x2_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct float32x4x4_t(
pub float32x4_t,
pub float32x4_t,
pub float32x4_t,
pub float32x4_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x1x4_t(
pub uint64x1_t,
pub uint64x1_t,
pub uint64x1_t,
pub uint64x1_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct uint64x2x4_t(
pub uint64x2_t,
pub uint64x2_t,
pub uint64x2_t,
pub uint64x2_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x1x4_t(
pub poly64x1_t,
pub poly64x1_t,
pub poly64x1_t,
pub poly64x1_t,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub struct poly64x2x4_t(
pub poly64x2_t,
pub poly64x2_t,
pub poly64x2_t,
pub poly64x2_t,
);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v8i8")]
fn vabs_s8_(a: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v4i16")]
fn vabs_s16_(a: int16x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v2i32")]
fn vabs_s32_(a: int32x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v16i8")]
fn vabsq_s8_(a: int8x16_t) -> int8x16_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v8i16")]
fn vabsq_s16_(a: int16x8_t) -> int16x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v4i32")]
fn vabsq_s32_(a: int32x4_t) -> int32x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sminp.v8i8")]
fn vpmins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sminp.v4i16")]
fn vpmins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sminp.v2i32")]
fn vpmins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uminp.v8i8")]
fn vpminu_v8i8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uminp.v4i16")]
fn vpminu_v4i16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uminp.v2i32")]
fn vpminu_v2i32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminp.v2f32")]
fn vpminf_v2f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smaxp.v8i8")]
fn vpmaxs_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smaxp.v4i16")]
fn vpmaxs_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smaxp.v2i32")]
fn vpmaxs_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umaxp.v8i8")]
fn vpmaxu_v8i8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umaxp.v4i16")]
fn vpmaxu_v4i16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umaxp.v2i32")]
fn vpmaxu_v2i32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxp.v2f32")]
fn vpmaxf_v2f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.raddhn.v8i8")]
fn vraddhn_s16_(a: int16x8_t, b: int16x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.raddhn.v4i16")]
fn vraddhn_s32_(a: int32x4_t, b: int32x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.raddhn.v2i32")]
fn vraddhn_s64_(a: int64x2_t, b: int64x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.addp.v4i16")]
fn vpadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.addp.v2i32")]
fn vpadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.addp.v8i8")]
fn vpadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8"
)]
pub(crate) fn vpaddl_s8_(a: int8x8_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16"
)]
pub(crate) fn vpaddl_s16_(a: int16x4_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32"
)]
pub(crate) fn vpaddl_s32_(a: int32x2_t) -> int64x1_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8"
)]
pub(crate) fn vpaddlq_s8_(a: int8x16_t) -> int16x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16"
)]
pub(crate) fn vpaddlq_s16_(a: int16x8_t) -> int32x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32"
)]
pub(crate) fn vpaddlq_s32_(a: int32x4_t) -> int64x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8"
)]
pub(crate) fn vpaddl_u8_(a: uint8x8_t) -> uint16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16"
)]
pub(crate) fn vpaddl_u16_(a: uint16x4_t) -> uint32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32"
)]
pub(crate) fn vpaddl_u32_(a: uint32x2_t) -> uint64x1_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8"
)]
pub(crate) fn vpaddlq_u8_(a: uint8x16_t) -> uint16x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16"
)]
pub(crate) fn vpaddlq_u16_(a: uint16x8_t) -> uint32x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32"
)]
pub(crate) fn vpaddlq_u32_(a: uint32x4_t) -> uint64x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctpop.v8i8")]
fn vcnt_s8_(a: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctpop.v16i8")]
fn vcntq_s8_(a: int8x16_t) -> int8x16_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v8i8")]
fn vclz_s8_(a: int8x8_t) -> int8x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v16i8")]
fn vclzq_s8_(a: int8x16_t) -> int8x16_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v4i16")]
fn vclz_s16_(a: int16x4_t) -> int16x4_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v8i16")]
fn vclzq_s16_(a: int16x8_t) -> int16x8_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v2i32")]
fn vclz_s32_(a: int32x2_t) -> int32x2_t;
#[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.ctlz.v4i32")]
fn vclzq_s32_(a: int32x4_t) -> int32x4_t;
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x8_t) -> int8x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x16_t) -> int8x16_t {
static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x4_t) -> int16x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x8_t) -> int16x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x2_t) -> int32x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x4_t) -> int32x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x8_t) -> uint8x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x16_t) -> uint8x16_t {
static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x4_t) -> uint16x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x8_t) -> uint16x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x2_t) -> uint32x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x4_t) -> uint32x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x8_t) -> poly8x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x16_t) -> poly8x16_t {
static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x4_t) -> poly16x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t) -> poly16x8_t {
static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x2_t) -> float32x2_t {
static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x4_t) -> float32x4_t {
static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t {
let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t {
let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t {
let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t {
let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t {
let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t {
let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t {
#[cfg(target_arch = "aarch64")]
{
crate::core_arch::aarch64::vld1_s64(ptr)
}
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::vld1_s64(ptr)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t {
let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t {
let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t {
let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t {
let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t {
let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t {
let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t {
let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t {
#[cfg(target_arch = "aarch64")]
{
crate::core_arch::aarch64::vld1_u64(ptr)
}
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::vld1_u64(ptr)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t {
let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t {
let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t {
let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t {
let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t {
let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0)));
simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t {
let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t {
#[cfg(target_arch = "aarch64")]
{
crate::core_arch::aarch64::vld1_p64(ptr)
}
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::vld1_p64(ptr)
}
}
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t {
let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0)));
simd_shuffle!(x, x, [0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t {
let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.)));
simd_shuffle!(x, x, [0, 0, 0, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
simd_add(a, vabd_s8(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
simd_add(a, vabd_s16(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
simd_add(a, vabd_s32(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
simd_add(a, vabd_u8(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
simd_add(a, vabd_u16(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
simd_add(a, vabd_u32(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
simd_add(a, vabdq_s8(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
simd_add(a, vabdq_s16(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
simd_add(a, vabdq_s32(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
simd_add(a, vabdq_u8(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
simd_add(a, vabdq_u16(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
simd_add(a, vabdq_u32(b, c))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t {
vabs_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t {
vabs_s16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t {
vabs_s32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t {
vabsq_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t {
vabsq_s16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t {
vabsq_s32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
vpadd_s16_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
vpadd_s32_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
vpadd_s8_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
transmute(vpadd_s16_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
transmute(vpadd_s32_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
transmute(vpadd_s8_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
let a: int16x8_t = simd_cast(a);
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
let a: int32x4_t = simd_cast(a);
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
let a: int64x2_t = simd_cast(a);
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
let a: uint16x8_t = simd_cast(a);
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
let a: uint32x4_t = simd_cast(a);
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
let a: uint64x2_t = simd_cast(a);
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let a: int16x8_t = simd_cast(a);
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let a: int32x4_t = simd_cast(a);
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let a: int64x2_t = simd_cast(a);
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let a: uint16x8_t = simd_cast(a);
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let a: uint32x4_t = simd_cast(a);
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
let a: uint64x2_t = simd_cast(a);
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t {
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t {
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t {
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t {
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32)));
simd_shuffle!(r, x, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32)));
simd_shuffle!(r, x, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
vraddhn_s16_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
vraddhn_s32_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
vraddhn_s64_(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
transmute(vraddhn_s16_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
transmute(vraddhn_s32_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
transmute(vraddhn_s64_(transmute(a), transmute(b)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
let x = vraddhn_s16_(a, b);
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
let x = vraddhn_s32_(a, b);
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
let x = vraddhn_s64_(a, b);
simd_shuffle!(r, x, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b)));
simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b)));
simd_shuffle!(r, x, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t {
vpaddl_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t {
vpaddl_s16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t {
vpaddl_s32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t {
vpaddlq_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t {
vpaddlq_s16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t {
vpaddlq_s32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t {
vpaddl_u8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t {
vpaddl_u16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t {
vpaddl_u32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t {
vpaddlq_u8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t {
vpaddlq_u16_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t {
vpaddlq_u32_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t {
simd_cast(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t {
let b = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t {
let b = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t {
let b = int16x4_t(-1, -1, -1, -1);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t {
let b = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t {
let b = int32x2_t(-1, -1);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t {
let b = int32x4_t(-1, -1, -1, -1);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t {
let b = uint8x8_t(255, 255, 255, 255, 255, 255, 255, 255);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t {
let b = uint8x16_t(
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t {
let b = uint16x4_t(65_535, 65_535, 65_535, 65_535);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t {
let b = uint16x8_t(
65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535,
);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t {
let b = uint32x2_t(4_294_967_295, 4_294_967_295);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t {
let b = uint32x4_t(4_294_967_295, 4_294_967_295, 4_294_967_295, 4_294_967_295);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t {
let b = poly8x8_t(255, 255, 255, 255, 255, 255, 255, 255);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t {
let b = poly8x16_t(
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
);
simd_xor(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
let c = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
let c = int16x4_t(-1, -1, -1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
let c = int32x2_t(-1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
let c = int32x4_t(-1, -1, -1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
let c = int64x1_t(-1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
let c = int64x2_t(-1, -1);
simd_and(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
let c = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
let c = int16x4_t(-1, -1, -1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
let c = int32x2_t(-1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
let c = int32x4_t(-1, -1, -1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
let c = int64x1_t(-1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
let c = int64x2_t(-1, -1);
simd_and(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
let not = int16x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
let not = int32x2_t(-1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t {
let not = int64x1_t(-1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
let not = int16x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
let not = int32x2_t(-1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t {
let not = int64x1_t(-1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
let not = int32x2_t(-1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t {
let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t {
let not = int16x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
let not = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
let not = int32x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
let not = int64x2_t(-1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
let not = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
let not = int32x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
let not = int64x2_t(-1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t {
let not = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t {
let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
let not = int32x4_t(-1, -1, -1, -1);
transmute(simd_or(
simd_and(a, transmute(b)),
simd_and(simd_xor(a, transmute(not)), transmute(c)),
))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
let c = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
let c = int16x4_t(-1, -1, -1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
let c = int32x2_t(-1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
let c = int32x4_t(-1, -1, -1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
let c = int64x1_t(-1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
let c = int64x2_t(-1, -1);
simd_or(simd_xor(b, c), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
let c = int8x16_t(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
let c = int16x4_t(-1, -1, -1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
let c = int32x2_t(-1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
let c = int32x4_t(-1, -1, -1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
let c = int64x1_t(-1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
let c = int64x2_t(-1, -1);
simd_or(simd_xor(b, transmute(c)), a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
vpmins_v8i8(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
vpmins_v4i16(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
vpmins_v2i32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
vpminu_v8i8(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
vpminu_v4i16(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
vpminu_v2i32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
vpminf_v2f32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
vpmaxs_v8i8(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
vpmaxs_v4i16(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
vpmaxs_v2i32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
vpmaxu_v8i8(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
vpmaxu_v4i16(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
vpmaxu_v2i32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
vpmaxf_v2f32(a, b)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u64<const IMM5: i32>(v: uint64x2_t) -> u64 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u64<const IMM5: i32>(v: uint64x1_t) -> u64 {
static_assert!(IMM5 == 0);
simd_extract(v, 0)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u16<const IMM5: i32>(v: uint16x4_t) -> u16 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s16<const IMM5: i32>(v: int16x4_t) -> i16 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p16<const IMM5: i32>(v: poly16x4_t) -> p16 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u32<const IMM5: i32>(v: uint32x2_t) -> u32 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s32<const IMM5: i32>(v: int32x2_t) -> i32 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_f32<const IMM5: i32>(v: float32x2_t) -> f32 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 1))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_f32<const IMM5: i32>(v: float32x4_t) -> f32 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p64<const IMM5: i32>(v: poly64x1_t) -> p64 {
static_assert!(IMM5 == 0);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p64<const IMM5: i32>(v: poly64x2_t) -> p64 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s64<const IMM5: i32>(v: int64x1_t) -> i64 {
static_assert!(IMM5 == 0);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 0))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s64<const IMM5: i32>(v: int64x2_t) -> i64 {
static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u16<const IMM5: i32>(v: uint16x8_t) -> u16 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u32<const IMM5: i32>(v: uint32x4_t) -> u32 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s16<const IMM5: i32>(v: int16x8_t) -> i16 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p16<const IMM5: i32>(v: poly16x8_t) -> p16 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s32<const IMM5: i32>(v: int32x4_t) -> i32 {
static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u8<const IMM5: i32>(v: uint8x8_t) -> u8 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s8<const IMM5: i32>(v: int8x8_t) -> i8 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p8<const IMM5: i32>(v: poly8x8_t) -> p8 {
static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u8<const IMM5: i32>(v: uint8x16_t) -> u8 {
static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s8<const IMM5: i32>(v: int8x16_t) -> i8 {
static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(test, assert_instr(nop, IMM5 = 2))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p8<const IMM5: i32>(v: poly8x16_t) -> p8 {
static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t {
simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t {
simd_shuffle!(a, a, [4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t {
simd_shuffle!(a, a, [2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t {
int64x1_t(simd_extract(a, 1))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t {
simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t {
simd_shuffle!(a, a, [4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t {
simd_shuffle!(a, a, [2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t {
uint64x1_t(simd_extract(a, 1))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t {
simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t {
simd_shuffle!(a, a, [4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t {
simd_shuffle!(a, a, [2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "vget_low_s8", since = "1.60.0")
)]
pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t {
simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t {
simd_shuffle!(a, a, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t {
simd_shuffle!(a, a, [0, 1])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t {
int64x1_t(simd_extract(a, 0))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t {
simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t {
simd_shuffle!(a, a, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t {
simd_shuffle!(a, a, [0, 1])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t {
uint64x1_t(simd_extract(a, 0))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t {
simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t {
simd_shuffle!(a, a, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t {
simd_shuffle!(a, a, [0, 1])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t {
int8x16_t(
value, value, value, value, value, value, value, value, value, value, value, value, value,
value, value, value,
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t {
int16x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t {
int32x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t {
int64x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t {
uint8x16_t(
value, value, value, value, value, value, value, value, value, value, value, value, value,
value, value, value,
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t {
uint16x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t {
uint32x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t {
uint64x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t {
poly8x16_t(
value, value, value, value, value, value, value, value, value, value, value, value, value,
value, value, value,
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t {
poly16x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t {
float32x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t {
float32x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t {
int8x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t {
int16x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t {
int32x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t {
int64x1_t(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t {
uint8x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t {
uint16x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t {
uint32x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t {
uint64x1_t(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t {
poly8x8_t(value, value, value, value, value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t {
poly16x4_t(value, value, value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t {
float32x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t {
float32x2_t(value, value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vldrq_p128(a: *const p128) -> p128 {
*a
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vstrq_p128(a: *mut p128, b: p128) {
*a = b;
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t {
vdup_n_s8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t {
vdup_n_s16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t {
vdup_n_s32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t {
vdup_n_s64(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t {
vdup_n_u8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t {
vdup_n_u16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t {
vdup_n_u32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t {
vdup_n_u64(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t {
vdup_n_p8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t {
vdup_n_p16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t {
vdup_n_f32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t {
vdupq_n_s8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t {
vdupq_n_s16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t {
vdupq_n_s32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t {
vdupq_n_s64(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t {
vdupq_n_u8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t {
vdupq_n_u16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t {
vdupq_n_u32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t {
vdupq_n_u64(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t {
vdupq_n_p8(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t {
vdupq_n_p16(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t {
vdupq_n_f32(value)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_s64<const N: i32>(a: int64x1_t, _b: int64x1_t) -> int64x1_t {
static_assert!(N == 0);
a
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_u64<const N: i32>(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t {
static_assert!(N == 0);
a
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t {
vcnt_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t {
vcntq_s8_(a)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t {
transmute(vcnt_s8_(transmute(a)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t {
transmute(vcntq_s8_(transmute(a)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t {
transmute(vcnt_s8_(transmute(a)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t {
transmute(vcntq_s8_(transmute(a)))
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t {
simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t {
simd_shuffle!(a, a, [3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t {
simd_shuffle!(a, a, [1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t {
simd_shuffle!(a, a, [3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t {
simd_shuffle!(a, a, [1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t {
simd_shuffle!(a, a, [1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t {
simd_shuffle!(a, a, [1, 0, 3, 2])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t {
simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t {
simd_shuffle!(a, a, [3, 2, 1, 0])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t {
simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_s8_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_s8_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_s16_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_s16_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_s32_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_s32_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_s8_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_s8_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_s16_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_s16_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_s32_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_s32_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_u8_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_u8_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_u16_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_u16_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadal_u32_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddl_u32_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_u8_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_u8_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_u16_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_u16_(b), a)
}
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
#[cfg(target_arch = "arm")]
{
crate::core_arch::arm::neon::vpadalq_u32_(a, b)
}
#[cfg(target_arch = "aarch64")]
{
simd_add(vpaddlq_u32_(b), a)
}
}
#[inline]
#[cfg_attr(not(bootstrap), target_feature(enable = "i8mm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smmla))]
pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8"
)]
fn vmmlaq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t;
}
vmmlaq_s32_(a, b, c)
}
#[inline]
#[cfg_attr(not(bootstrap), target_feature(enable = "i8mm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ummla))]
pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8"
)]
fn vmmlaq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t;
}
vmmlaq_u32_(a, b, c)
}
#[inline]
#[cfg_attr(not(bootstrap), target_feature(enable = "i8mm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usmmla))]
pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8"
)]
fn vusmmlaq_s32_(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t;
}
vusmmlaq_s32_(a, b, c)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t {
simd_shuffle!(low, high, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t {
simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t {
simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t {
simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t {
simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t {
simd_shuffle!(low, high, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t {
simd_shuffle!(low, high, [0, 1])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t {
simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
)
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t {
simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t {
simd_shuffle!(low, high, [0, 1, 2, 3])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t {
simd_shuffle!(low, high, [0, 1])
}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
#[cfg_attr(
target_arch = "aarch64",
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t {
simd_shuffle!(low, high, [0, 1])
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(target_arch = "aarch64")]
use crate::core_arch::aarch64::*;
#[cfg(target_arch = "arm")]
use crate::core_arch::arm::*;
use crate::core_arch::arm_shared::test_support::*;
use crate::core_arch::simd::*;
use std::{i16, i32, i8, mem::transmute, u16, u32, u8, vec::Vec};
use stdarch_test::simd_test;
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_s8() {
let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: i8 = 42;
let e = i8x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: i8x8 = transmute(vld1_lane_s8::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_s8() {
let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let elem: i8 = 42;
let e = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42);
let r: i8x16 = transmute(vld1q_lane_s8::<15>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_s16() {
let a = i16x4::new(0, 1, 2, 3);
let elem: i16 = 42;
let e = i16x4::new(0, 1, 2, 42);
let r: i16x4 = transmute(vld1_lane_s16::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_s16() {
let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: i16 = 42;
let e = i16x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: i16x8 = transmute(vld1q_lane_s16::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_s32() {
let a = i32x2::new(0, 1);
let elem: i32 = 42;
let e = i32x2::new(0, 42);
let r: i32x2 = transmute(vld1_lane_s32::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_s32() {
let a = i32x4::new(0, 1, 2, 3);
let elem: i32 = 42;
let e = i32x4::new(0, 1, 2, 42);
let r: i32x4 = transmute(vld1q_lane_s32::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_s64() {
let a = i64x1::new(0);
let elem: i64 = 42;
let e = i64x1::new(42);
let r: i64x1 = transmute(vld1_lane_s64::<0>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_s64() {
let a = i64x2::new(0, 1);
let elem: i64 = 42;
let e = i64x2::new(0, 42);
let r: i64x2 = transmute(vld1q_lane_s64::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_u8() {
let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: u8 = 42;
let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: u8x8 = transmute(vld1_lane_u8::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_u8() {
let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let elem: u8 = 42;
let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42);
let r: u8x16 = transmute(vld1q_lane_u8::<15>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_u16() {
let a = u16x4::new(0, 1, 2, 3);
let elem: u16 = 42;
let e = u16x4::new(0, 1, 2, 42);
let r: u16x4 = transmute(vld1_lane_u16::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_u16() {
let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: u16 = 42;
let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: u16x8 = transmute(vld1q_lane_u16::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_u32() {
let a = u32x2::new(0, 1);
let elem: u32 = 42;
let e = u32x2::new(0, 42);
let r: u32x2 = transmute(vld1_lane_u32::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_u32() {
let a = u32x4::new(0, 1, 2, 3);
let elem: u32 = 42;
let e = u32x4::new(0, 1, 2, 42);
let r: u32x4 = transmute(vld1q_lane_u32::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_u64() {
let a = u64x1::new(0);
let elem: u64 = 42;
let e = u64x1::new(42);
let r: u64x1 = transmute(vld1_lane_u64::<0>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_u64() {
let a = u64x2::new(0, 1);
let elem: u64 = 42;
let e = u64x2::new(0, 42);
let r: u64x2 = transmute(vld1q_lane_u64::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_p8() {
let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: p8 = 42;
let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: u8x8 = transmute(vld1_lane_p8::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_p8() {
let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let elem: p8 = 42;
let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42);
let r: u8x16 = transmute(vld1q_lane_p8::<15>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_p16() {
let a = u16x4::new(0, 1, 2, 3);
let elem: p16 = 42;
let e = u16x4::new(0, 1, 2, 42);
let r: u16x4 = transmute(vld1_lane_p16::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_p16() {
let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let elem: p16 = 42;
let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42);
let r: u16x8 = transmute(vld1q_lane_p16::<7>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon,aes")]
unsafe fn test_vld1_lane_p64() {
let a = u64x1::new(0);
let elem: u64 = 42;
let e = u64x1::new(42);
let r: u64x1 = transmute(vld1_lane_p64::<0>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon,aes")]
unsafe fn test_vld1q_lane_p64() {
let a = u64x2::new(0, 1);
let elem: u64 = 42;
let e = u64x2::new(0, 42);
let r: u64x2 = transmute(vld1q_lane_p64::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_lane_f32() {
let a = f32x2::new(0., 1.);
let elem: f32 = 42.;
let e = f32x2::new(0., 42.);
let r: f32x2 = transmute(vld1_lane_f32::<1>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_lane_f32() {
let a = f32x4::new(0., 1., 2., 3.);
let elem: f32 = 42.;
let e = f32x4::new(0., 1., 2., 42.);
let r: f32x4 = transmute(vld1q_lane_f32::<3>(&elem, transmute(a)));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_s8() {
let elem: i8 = 42;
let e = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: i8x8 = transmute(vld1_dup_s8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_s8() {
let elem: i8 = 42;
let e = i8x16::new(
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
);
let r: i8x16 = transmute(vld1q_dup_s8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_s16() {
let elem: i16 = 42;
let e = i16x4::new(42, 42, 42, 42);
let r: i16x4 = transmute(vld1_dup_s16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_s16() {
let elem: i16 = 42;
let e = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: i16x8 = transmute(vld1q_dup_s16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_s32() {
let elem: i32 = 42;
let e = i32x2::new(42, 42);
let r: i32x2 = transmute(vld1_dup_s32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_s32() {
let elem: i32 = 42;
let e = i32x4::new(42, 42, 42, 42);
let r: i32x4 = transmute(vld1q_dup_s32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_s64() {
let elem: i64 = 42;
let e = i64x1::new(42);
let r: i64x1 = transmute(vld1_dup_s64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_s64() {
let elem: i64 = 42;
let e = i64x2::new(42, 42);
let r: i64x2 = transmute(vld1q_dup_s64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_u8() {
let elem: u8 = 42;
let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: u8x8 = transmute(vld1_dup_u8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_u8() {
let elem: u8 = 42;
let e = u8x16::new(
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
);
let r: u8x16 = transmute(vld1q_dup_u8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_u16() {
let elem: u16 = 42;
let e = u16x4::new(42, 42, 42, 42);
let r: u16x4 = transmute(vld1_dup_u16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_u16() {
let elem: u16 = 42;
let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: u16x8 = transmute(vld1q_dup_u16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_u32() {
let elem: u32 = 42;
let e = u32x2::new(42, 42);
let r: u32x2 = transmute(vld1_dup_u32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_u32() {
let elem: u32 = 42;
let e = u32x4::new(42, 42, 42, 42);
let r: u32x4 = transmute(vld1q_dup_u32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_u64() {
let elem: u64 = 42;
let e = u64x1::new(42);
let r: u64x1 = transmute(vld1_dup_u64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_u64() {
let elem: u64 = 42;
let e = u64x2::new(42, 42);
let r: u64x2 = transmute(vld1q_dup_u64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_p8() {
let elem: p8 = 42;
let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: u8x8 = transmute(vld1_dup_p8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_p8() {
let elem: p8 = 42;
let e = u8x16::new(
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
);
let r: u8x16 = transmute(vld1q_dup_p8(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_p16() {
let elem: p16 = 42;
let e = u16x4::new(42, 42, 42, 42);
let r: u16x4 = transmute(vld1_dup_p16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_p16() {
let elem: p16 = 42;
let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let r: u16x8 = transmute(vld1q_dup_p16(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon,aes")]
unsafe fn test_vld1_dup_p64() {
let elem: u64 = 42;
let e = u64x1::new(42);
let r: u64x1 = transmute(vld1_dup_p64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon,aes")]
unsafe fn test_vld1q_dup_p64() {
let elem: u64 = 42;
let e = u64x2::new(42, 42);
let r: u64x2 = transmute(vld1q_dup_p64(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_dup_f32() {
let elem: f32 = 42.;
let e = f32x2::new(42., 42.);
let r: f32x2 = transmute(vld1_dup_f32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_dup_f32() {
let elem: f32 = 42.;
let e = f32x4::new(42., 42., 42., 42.);
let r: f32x4 = transmute(vld1q_dup_f32(&elem));
assert_eq!(r, e)
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_u8() {
let v = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r = vget_lane_u8::<1>(transmute(v));
assert_eq!(r, 2);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_u32() {
let v = i32x4::new(1, 2, 3, 4);
let r = vgetq_lane_u32::<1>(transmute(v));
assert_eq!(r, 2);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_s32() {
let v = i32x4::new(1, 2, 3, 4);
let r = vgetq_lane_s32::<1>(transmute(v));
assert_eq!(r, 2);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_u64() {
let v: u64 = 1;
let r = vget_lane_u64::<0>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_u16() {
let v = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r = vgetq_lane_u16::<1>(transmute(v));
assert_eq!(r, 2);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_s8() {
let v = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r = vget_lane_s8::<2>(transmute(v));
assert_eq!(r, 2);
let r = vget_lane_s8::<4>(transmute(v));
assert_eq!(r, 4);
let r = vget_lane_s8::<5>(transmute(v));
assert_eq!(r, 5);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_p8() {
let v = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r = vget_lane_p8::<2>(transmute(v));
assert_eq!(r, 2);
let r = vget_lane_p8::<3>(transmute(v));
assert_eq!(r, 3);
let r = vget_lane_p8::<5>(transmute(v));
assert_eq!(r, 5);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_p16() {
let v = u16x4::new(0, 1, 2, 3);
let r = vget_lane_p16::<2>(transmute(v));
assert_eq!(r, 2);
let r = vget_lane_p16::<3>(transmute(v));
assert_eq!(r, 3);
let r = vget_lane_p16::<0>(transmute(v));
assert_eq!(r, 0);
let r = vget_lane_p16::<1>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_s16() {
let v = i16x4::new(0, 1, 2, 3);
let r = vget_lane_s16::<2>(transmute(v));
assert_eq!(r, 2);
let r = vget_lane_s16::<3>(transmute(v));
assert_eq!(r, 3);
let r = vget_lane_s16::<0>(transmute(v));
assert_eq!(r, 0);
let r = vget_lane_s16::<1>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_u16() {
let v = u16x4::new(0, 1, 2, 3);
let r = vget_lane_u16::<2>(transmute(v));
assert_eq!(r, 2);
let r = vget_lane_u16::<3>(transmute(v));
assert_eq!(r, 3);
let r = vget_lane_u16::<0>(transmute(v));
assert_eq!(r, 0);
let r = vget_lane_u16::<1>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_f32() {
let v = f32x2::new(0.0, 1.0);
let r = vget_lane_f32::<1>(transmute(v));
assert_eq!(r, 1.0);
let r = vget_lane_f32::<0>(transmute(v));
assert_eq!(r, 0.0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_s32() {
let v = i32x2::new(0, 1);
let r = vget_lane_s32::<1>(transmute(v));
assert_eq!(r, 1);
let r = vget_lane_s32::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_u32() {
let v = u32x2::new(0, 1);
let r = vget_lane_u32::<1>(transmute(v));
assert_eq!(r, 1);
let r = vget_lane_u32::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_s64() {
let v = i64x1::new(1);
let r = vget_lane_s64::<0>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_lane_p64() {
let v = u64x1::new(1);
let r = vget_lane_p64::<0>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_s8() {
let v = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r = vgetq_lane_s8::<7>(transmute(v));
assert_eq!(r, 7);
let r = vgetq_lane_s8::<13>(transmute(v));
assert_eq!(r, 13);
let r = vgetq_lane_s8::<3>(transmute(v));
assert_eq!(r, 3);
let r = vgetq_lane_s8::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_p8() {
let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r = vgetq_lane_p8::<7>(transmute(v));
assert_eq!(r, 7);
let r = vgetq_lane_p8::<13>(transmute(v));
assert_eq!(r, 13);
let r = vgetq_lane_p8::<3>(transmute(v));
assert_eq!(r, 3);
let r = vgetq_lane_p8::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_u8() {
let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r = vgetq_lane_u8::<7>(transmute(v));
assert_eq!(r, 7);
let r = vgetq_lane_u8::<13>(transmute(v));
assert_eq!(r, 13);
let r = vgetq_lane_u8::<3>(transmute(v));
assert_eq!(r, 3);
let r = vgetq_lane_u8::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_s16() {
let v = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r = vgetq_lane_s16::<3>(transmute(v));
assert_eq!(r, 3);
let r = vgetq_lane_s16::<6>(transmute(v));
assert_eq!(r, 6);
let r = vgetq_lane_s16::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_p16() {
let v = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r = vgetq_lane_p16::<3>(transmute(v));
assert_eq!(r, 3);
let r = vgetq_lane_p16::<7>(transmute(v));
assert_eq!(r, 7);
let r = vgetq_lane_p16::<1>(transmute(v));
assert_eq!(r, 1);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_f32() {
let v = f32x4::new(0.0, 1.0, 2.0, 3.0);
let r = vgetq_lane_f32::<3>(transmute(v));
assert_eq!(r, 3.0);
let r = vgetq_lane_f32::<0>(transmute(v));
assert_eq!(r, 0.0);
let r = vgetq_lane_f32::<2>(transmute(v));
assert_eq!(r, 2.0);
let r = vgetq_lane_f32::<1>(transmute(v));
assert_eq!(r, 1.0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_s64() {
let v = i64x2::new(0, 1);
let r = vgetq_lane_s64::<1>(transmute(v));
assert_eq!(r, 1);
let r = vgetq_lane_s64::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vgetq_lane_p64() {
let v = u64x2::new(0, 1);
let r = vgetq_lane_p64::<1>(transmute(v));
assert_eq!(r, 1);
let r = vgetq_lane_p64::<0>(transmute(v));
assert_eq!(r, 0);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s64() {
let a: i64x1 = i64x1::new(0);
let b: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vext_s64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u64() {
let a: u64x1 = u64x1::new(0);
let b: u64x1 = u64x1::new(1);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vext_u64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_s8() {
let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = i8x8::new(9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x8 = transmute(vget_high_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_s16() {
let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = i16x4::new(5, 6, 7, 8);
let r: i16x4 = transmute(vget_high_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_s32() {
let a = i32x4::new(1, 2, 3, 4);
let e = i32x2::new(3, 4);
let r: i32x2 = transmute(vget_high_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_s64() {
let a = i64x2::new(1, 2);
let e = i64x1::new(2);
let r: i64x1 = transmute(vget_high_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_u8() {
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x8 = transmute(vget_high_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_u16() {
let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = u16x4::new(5, 6, 7, 8);
let r: u16x4 = transmute(vget_high_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_u32() {
let a = u32x4::new(1, 2, 3, 4);
let e = u32x2::new(3, 4);
let r: u32x2 = transmute(vget_high_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_u64() {
let a = u64x2::new(1, 2);
let e = u64x1::new(2);
let r: u64x1 = transmute(vget_high_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_p8() {
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x8 = transmute(vget_high_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_p16() {
let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = u16x4::new(5, 6, 7, 8);
let r: u16x4 = transmute(vget_high_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_high_f32() {
let a = f32x4::new(1.0, 2.0, 3.0, 4.0);
let e = f32x2::new(3.0, 4.0);
let r: f32x2 = transmute(vget_high_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_s8() {
let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vget_low_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_s16() {
let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vget_low_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_s32() {
let a = i32x4::new(1, 2, 3, 4);
let e = i32x2::new(1, 2);
let r: i32x2 = transmute(vget_low_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_s64() {
let a = i64x2::new(1, 2);
let e = i64x1::new(1);
let r: i64x1 = transmute(vget_low_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_u8() {
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vget_low_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_u16() {
let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vget_low_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_u32() {
let a = u32x4::new(1, 2, 3, 4);
let e = u32x2::new(1, 2);
let r: u32x2 = transmute(vget_low_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_u64() {
let a = u64x2::new(1, 2);
let e = u64x1::new(1);
let r: u64x1 = transmute(vget_low_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_p8() {
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vget_low_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_p16() {
let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vget_low_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vget_low_f32() {
let a = f32x4::new(1.0, 2.0, 3.0, 4.0);
let e = f32x2::new(1.0, 2.0);
let r: f32x2 = transmute(vget_low_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_s8() {
let v: i8 = 42;
let e = i8x16::new(
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
);
let r: i8x16 = transmute(vdupq_n_s8(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_s16() {
let v: i16 = 64;
let e = i16x8::new(64, 64, 64, 64, 64, 64, 64, 64);
let r: i16x8 = transmute(vdupq_n_s16(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_s32() {
let v: i32 = 64;
let e = i32x4::new(64, 64, 64, 64);
let r: i32x4 = transmute(vdupq_n_s32(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_s64() {
let v: i64 = 64;
let e = i64x2::new(64, 64);
let r: i64x2 = transmute(vdupq_n_s64(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_u8() {
let v: u8 = 64;
let e = u8x16::new(
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
);
let r: u8x16 = transmute(vdupq_n_u8(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_u16() {
let v: u16 = 64;
let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64);
let r: u16x8 = transmute(vdupq_n_u16(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_u32() {
let v: u32 = 64;
let e = u32x4::new(64, 64, 64, 64);
let r: u32x4 = transmute(vdupq_n_u32(v));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_n_u64() {
let v: u64 =