Abs(Vector128<Double>)
|
float64x2_t vabsq_f64 (float64x2_t a)
A64: FABS Vd.2D, Vn.2D
|
Abs(Vector128<Int64>)
|
int64x2_t vabsq_s64 (int64x2_t a)
A64: ABS Vd.2D, Vn.2D
|
AbsoluteCompareGreaterThan(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcagtq_f64 (float64x2_t a, float64x2_t b)
A64: FACGT Vd.2D, Vn.2D, Vm.2D
|
AbsoluteCompareGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcageq_f64 (float64x2_t a, float64x2_t b)
A64: FACGE Vd.2D, Vn.2D, Vm.2D
|
AbsoluteCompareGreaterThanOrEqualScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcage_f64 (float64x1_t a, float64x1_t b)
A64: FACGE Dd, Dn, Dm
|
AbsoluteCompareGreaterThanOrEqualScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcages_f32 (float32_t a, float32_t b)
A64: FACGE Sd, Sn, Sm
|
AbsoluteCompareGreaterThanScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcagt_f64 (float64x1_t a, float64x1_t b)
A64: FACGT Dd, Dn, Dm
|
AbsoluteCompareGreaterThanScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcagts_f32 (float32_t a, float32_t b)
A64: FACGT Sd, Sn, Sm
|
AbsoluteCompareLessThan(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcaltq_f64 (float64x2_t a, float64x2_t b)
A64: FACGT Vd.2D, Vn.2D, Vm.2D
|
AbsoluteCompareLessThanOrEqual(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcaleq_f64 (float64x2_t a, float64x2_t b)
A64: FACGE Vd.2D, Vn.2D, Vm.2D
|
AbsoluteCompareLessThanOrEqualScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcale_f64 (float64x1_t a, float64x1_t b)
A64: FACGE Dd, Dn, Dm
|
AbsoluteCompareLessThanOrEqualScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcales_f32 (float32_t a, float32_t b)
A64: FACGE Sd, Sn, Sm
|
AbsoluteCompareLessThanScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcalt_f64 (float64x1_t a, float64x1_t b)
A64: FACGT Dd, Dn, Dm
|
AbsoluteCompareLessThanScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcalts_f32 (float32_t a, float32_t b)
A64: FACGT Sd, Sn, Sm
|
AbsoluteDifference(Vector128<Double>, Vector128<Double>)
|
float64x2_t vabdq_f64 (float64x2_t a, float64x2_t b)
A64: FABD Vd.2D, Vn.2D, Vm.2D
|
AbsoluteDifferenceScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vabd_f64 (float64x1_t a, float64x1_t b)
A64: FABD Dd, Dn, Dm
|
AbsoluteDifferenceScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vabds_f32 (float32_t a, float32_t b)
A64: FABD Sd, Sn, Sm
|
AbsSaturate(Vector128<Int64>)
|
int64x2_t vqabsq_s64 (int64x2_t a)
A64: SQABS Vd.2D, Vn.2D
|
AbsSaturateScalar(Vector64<Int16>)
|
int16_t vqabsh_s16 (int16_t a)
A64: SQABS Hd, Hn
|
AbsSaturateScalar(Vector64<Int32>)
|
int32_t vqabss_s32 (int32_t a)
A64: SQABS Sd, Sn
|
AbsSaturateScalar(Vector64<Int64>)
|
int64_t vqabsd_s64 (int64_t a)
A64: SQABS Dd, Dn
|
AbsSaturateScalar(Vector64<SByte>)
|
int8_t vqabsb_s8 (int8_t a)
A64: SQABS Bd, Bn
|
AbsScalar(Vector64<Int64>)
|
int64x1_t vabs_s64 (int64x1_t a)
A64: ABS Dd, Dn
|
Add(Vector128<Double>, Vector128<Double>)
|
float64x2_t vaddq_f64 (float64x2_t a, float64x2_t b)
A64: FADD Vd.2D, Vn.2D, Vm.2D
|
AddAcross(Vector128<Byte>)
|
uint8_t vaddvq_u8 (uint8x16_t a)
A64: ADDV Bd, Vn.16B
|
AddAcross(Vector128<Int16>)
|
int16_t vaddvq_s16 (int16x8_t a)
A64: ADDV Hd, Vn.8H
|
AddAcross(Vector128<Int32>)
|
int32_t vaddvq_s32 (int32x4_t a)
A64: ADDV Sd, Vn.4S
|
AddAcross(Vector128<SByte>)
|
int8_t vaddvq_s8 (int8x16_t a)
A64: ADDV Bd, Vn.16B
|
AddAcross(Vector128<UInt16>)
|
uint16_t vaddvq_u16 (uint16x8_t a)
A64: ADDV Hd, Vn.8H
|
AddAcross(Vector128<UInt32>)
|
uint32_t vaddvq_u32 (uint32x4_t a)
A64: ADDV Sd, Vn.4S
|
AddAcross(Vector64<Byte>)
|
uint8_t vaddv_u8 (uint8x8_t a)
A64: ADDV Bd, Vn.8B
|
AddAcross(Vector64<Int16>)
|
int16_t vaddv_s16 (int16x4_t a)
A64: ADDV Hd, Vn.4H
|
AddAcross(Vector64<SByte>)
|
int8_t vaddv_s8 (int8x8_t a)
A64: ADDV Bd, Vn.8B
|
AddAcross(Vector64<UInt16>)
|
uint16_t vaddv_u16 (uint16x4_t a)
A64: ADDV Hd, Vn.4H
|
AddAcrossWidening(Vector128<Byte>)
|
uint16_t vaddlvq_u8 (uint8x16_t a)
A64: UADDLV Hd, Vn.16B
|
AddAcrossWidening(Vector128<Int16>)
|
int32_t vaddlvq_s16 (int16x8_t a)
A64: SADDLV Sd, Vn.8H
|
AddAcrossWidening(Vector128<Int32>)
|
int64_t vaddlvq_s32 (int32x4_t a)
A64: SADDLV Dd, Vn.4S
|
AddAcrossWidening(Vector128<SByte>)
|
int16_t vaddlvq_s8 (int8x16_t a)
A64: SADDLV Hd, Vn.16B
|
AddAcrossWidening(Vector128<UInt16>)
|
uint32_t vaddlvq_u16 (uint16x8_t a)
A64: UADDLV Sd, Vn.8H
|
AddAcrossWidening(Vector128<UInt32>)
|
uint64_t vaddlvq_u32 (uint32x4_t a)
A64: UADDLV Dd, Vn.4S
|
AddAcrossWidening(Vector64<Byte>)
|
uint16_t vaddlv_u8 (uint8x8_t a)
A64: UADDLV Hd, Vn.8B
|
AddAcrossWidening(Vector64<Int16>)
|
int32_t vaddlv_s16 (int16x4_t a)
A64: SADDLV Sd, Vn.4H
|
AddAcrossWidening(Vector64<SByte>)
|
int16_t vaddlv_s8 (int8x8_t a)
A64: SADDLV Hd, Vn.8B
|
AddAcrossWidening(Vector64<UInt16>)
|
uint32_t vaddlv_u16 (uint16x4_t a)
A64: UADDLV Sd, Vn.4H
|
AddPairwise(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vpaddq_u8 (uint8x16_t a, uint8x16_t b)
A64: ADDP Vd.16B, Vn.16B, Vm.16B
|
AddPairwise(Vector128<Double>, Vector128<Double>)
|
float64x2_t vpaddq_f64 (float64x2_t a, float64x2_t b)
A64: FADDP Vd.2D, Vn.2D, Vm.2D
|
AddPairwise(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vpaddq_s16 (int16x8_t a, int16x8_t b)
A64: ADDP Vd.8H, Vn.8H, Vm.8H
|
AddPairwise(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vpaddq_s32 (int32x4_t a, int32x4_t b)
A64: ADDP Vd.4S, Vn.4S, Vm.4S
|
AddPairwise(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vpaddq_s64 (int64x2_t a, int64x2_t b)
A64: ADDP Vd.2D, Vn.2D, Vm.2D
|
AddPairwise(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vpaddq_s8 (int8x16_t a, int8x16_t b)
A64: ADDP Vd.16B, Vn.16B, Vm.16B
|
AddPairwise(Vector128<Single>, Vector128<Single>)
|
float32x4_t vpaddq_f32 (float32x4_t a, float32x4_t b)
A64: FADDP Vd.4S, Vn.4S, Vm.4S
|
AddPairwise(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vpaddq_u16 (uint16x8_t a, uint16x8_t b)
A64: ADDP Vd.8H, Vn.8H, Vm.8H
|
AddPairwise(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vpaddq_u32 (uint32x4_t a, uint32x4_t b)
A64: ADDP Vd.4S, Vn.4S, Vm.4S
|
AddPairwise(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vpaddq_u64 (uint64x2_t a, uint64x2_t b)
A64: ADDP Vd.2D, Vn.2D, Vm.2D
|
AddPairwiseScalar(Vector128<Double>)
|
float64_t vpaddd_f64 (float64x2_t a)
A64: FADDP Dd, Vn.2D
|
AddPairwiseScalar(Vector128<Int64>)
|
int64_t vpaddd_s64 (int64x2_t a)
A64: ADDP Dd, Vn.2D
|
AddPairwiseScalar(Vector128<UInt64>)
|
uint64_t vpaddd_u64 (uint64x2_t a)
A64: ADDP Dd, Vn.2D
|
AddPairwiseScalar(Vector64<Single>)
|
float32_t vpadds_f32 (float32x2_t a)
A64: FADDP Sd, Vn.2S
|
AddSaturate(Vector128<Byte>, Vector128<SByte>)
|
uint8x16_t vsqaddq_u8 (uint8x16_t a, int8x16_t b)
A64: USQADD Vd.16B, Vn.16B
|
AddSaturate(Vector128<Int16>, Vector128<UInt16>)
|
int16x8_t vuqaddq_s16 (int16x8_t a, uint16x8_t b)
A64: SUQADD Vd.8H, Vn.8H
|
AddSaturate(Vector128<Int32>, Vector128<UInt32>)
|
int32x4_t vuqaddq_s32 (int32x4_t a, uint32x4_t b)
A64: SUQADD Vd.4S, Vn.4S
|
AddSaturate(Vector128<Int64>, Vector128<UInt64>)
|
int64x2_t vuqaddq_s64 (int64x2_t a, uint64x2_t b)
A64: SUQADD Vd.2D, Vn.2D
|
AddSaturate(Vector128<SByte>, Vector128<Byte>)
|
int8x16_t vuqaddq_s8 (int8x16_t a, uint8x16_t b)
A64: SUQADD Vd.16B, Vn.16B
|
AddSaturate(Vector128<UInt16>, Vector128<Int16>)
|
uint16x8_t vsqaddq_u16 (uint16x8_t a, int16x8_t b)
A64: USQADD Vd.8H, Vn.8H
|
AddSaturate(Vector128<UInt32>, Vector128<Int32>)
|
uint32x4_t vsqaddq_u32 (uint32x4_t a, int32x4_t b)
A64: USQADD Vd.4S, Vn.4S
|
AddSaturate(Vector128<UInt64>, Vector128<Int64>)
|
uint64x2_t vsqaddq_u64 (uint64x2_t a, int64x2_t b)
A64: USQADD Vd.2D, Vn.2D
|
AddSaturate(Vector64<Byte>, Vector64<SByte>)
|
uint8x8_t vsqadd_u8 (uint8x8_t a, int8x8_t b)
A64: USQADD Vd.8B, Vn.8B
|
AddSaturate(Vector64<Int16>, Vector64<UInt16>)
|
int16x4_t vuqadd_s16 (int16x4_t a, uint16x4_t b)
A64: SUQADD Vd.4H, Vn.4H
|
AddSaturate(Vector64<Int32>, Vector64<UInt32>)
|
int32x2_t vuqadd_s32 (int32x2_t a, uint32x2_t b)
A64: SUQADD Vd.2S, Vn.2S
|
AddSaturate(Vector64<SByte>, Vector64<Byte>)
|
int8x8_t vuqadd_s8 (int8x8_t a, uint8x8_t b)
A64: SUQADD Vd.8B, Vn.8B
|
AddSaturate(Vector64<UInt16>, Vector64<Int16>)
|
uint16x4_t vsqadd_u16 (uint16x4_t a, int16x4_t b)
A64: USQADD Vd.4H, Vn.4H
|
AddSaturate(Vector64<UInt32>, Vector64<Int32>)
|
uint32x2_t vsqadd_u32 (uint32x2_t a, int32x2_t b)
A64: USQADD Vd.2S, Vn.2S
|
AddSaturateScalar(Vector64<Byte>, Vector64<Byte>)
|
uint8_t vqaddb_u8 (uint8_t a, uint8_t b)
A64: UQADD Bd, Bn, Bm
|
AddSaturateScalar(Vector64<Byte>, Vector64<SByte>)
|
uint8_t vsqaddb_u8 (uint8_t a, int8_t b)
A64: USQADD Bd, Bn
|
AddSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqaddh_s16 (int16_t a, int16_t b)
A64: SQADD Hd, Hn, Hm
|
AddSaturateScalar(Vector64<Int16>, Vector64<UInt16>)
|
int16_t vuqaddh_s16 (int16_t a, uint16_t b)
A64: SUQADD Hd, Hn
|
AddSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqadds_s32 (int32_t a, int32_t b)
A64: SQADD Sd, Sn, Sm
|
AddSaturateScalar(Vector64<Int32>, Vector64<UInt32>)
|
int32_t vuqadds_s32 (int32_t a, uint32_t b)
A64: SUQADD Sd, Sn
|
AddSaturateScalar(Vector64<Int64>, Vector64<UInt64>)
|
int64x1_t vuqadd_s64 (int64x1_t a, uint64x1_t b)
A64: SUQADD Dd, Dn
|
AddSaturateScalar(Vector64<SByte>, Vector64<Byte>)
|
int8_t vuqaddb_s8 (int8_t a, uint8_t b)
A64: SUQADD Bd, Bn
|
AddSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
int8_t vqaddb_s8 (int8_t a, int8_t b)
A64: SQADD Bd, Bn, Bm
|
AddSaturateScalar(Vector64<UInt16>, Vector64<Int16>)
|
uint16_t vsqaddh_u16 (uint16_t a, int16_t b)
A64: USQADD Hd, Hn
|
AddSaturateScalar(Vector64<UInt16>, Vector64<UInt16>)
|
uint16_t vqaddh_u16 (uint16_t a, uint16_t b)
A64: UQADD Hd, Hn, Hm
|
AddSaturateScalar(Vector64<UInt32>, Vector64<Int32>)
|
uint32_t vsqadds_u32 (uint32_t a, int32_t b)
A64: USQADD Sd, Sn
|
AddSaturateScalar(Vector64<UInt32>, Vector64<UInt32>)
|
uint32_t vqadds_u32 (uint32_t a, uint32_t b)
A64: UQADD Sd, Sn, Sm
|
AddSaturateScalar(Vector64<UInt64>, Vector64<Int64>)
|
uint64x1_t vsqadd_u64 (uint64x1_t a, int64x1_t b)
A64: USQADD Dd, Dn
|
Ceiling(Vector128<Double>)
|
float64x2_t vrndpq_f64 (float64x2_t a)
A64: FRINTP Vd.2D, Vn.2D
|
CompareEqual(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vceqq_f64 (float64x2_t a, float64x2_t b)
A64: FCMEQ Vd.2D, Vn.2D, Vm.2D
|
CompareEqual(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vceqq_s64 (int64x2_t a, int64x2_t b)
A64: CMEQ Vd.2D, Vn.2D, Vm.2D
|
CompareEqual(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vceqq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMEQ Vd.2D, Vn.2D, Vm.2D
|
CompareEqualScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vceq_f64 (float64x1_t a, float64x1_t b)
A64: FCMEQ Dd, Dn, Dm
|
CompareEqualScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vceq_s64 (int64x1_t a, int64x1_t b)
A64: CMEQ Dd, Dn, Dm
|
CompareEqualScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vceqs_f32 (float32_t a, float32_t b)
A64: FCMEQ Sd, Sn, Sm
|
CompareEqualScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vceq_u64 (uint64x1_t a, uint64x1_t b)
A64: CMEQ Dd, Dn, Dm
|
CompareGreaterThan(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcgtq_f64 (float64x2_t a, float64x2_t b)
A64: FCMGT Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThan(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vcgtq_s64 (int64x2_t a, int64x2_t b)
A64: CMGT Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThan(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vcgtq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMHI Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcgeq_f64 (float64x2_t a, float64x2_t b)
A64: FCMGE Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThanOrEqual(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vcgeq_s64 (int64x2_t a, int64x2_t b)
A64: CMGE Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vcgeq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMHS Vd.2D, Vn.2D, Vm.2D
|
CompareGreaterThanOrEqualScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcge_f64 (float64x1_t a, float64x1_t b)
A64: FCMGE Dd, Dn, Dm
|
CompareGreaterThanOrEqualScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vcge_s64 (int64x1_t a, int64x1_t b)
A64: CMGE Dd, Dn, Dm
|
CompareGreaterThanOrEqualScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcges_f32 (float32_t a, float32_t b)
A64: FCMGE Sd, Sn, Sm
|
CompareGreaterThanOrEqualScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vcge_u64 (uint64x1_t a, uint64x1_t b)
A64: CMHS Dd, Dn, Dm
|
CompareGreaterThanScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcgt_f64 (float64x1_t a, float64x1_t b)
A64: FCMGT Dd, Dn, Dm
|
CompareGreaterThanScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vcgt_s64 (int64x1_t a, int64x1_t b)
A64: CMGT Dd, Dn, Dm
|
CompareGreaterThanScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcgts_f32 (float32_t a, float32_t b)
A64: FCMGT Sd, Sn, Sm
|
CompareGreaterThanScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vcgt_u64 (uint64x1_t a, uint64x1_t b)
A64: CMHI Dd, Dn, Dm
|
CompareLessThan(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcltq_f64 (float64x2_t a, float64x2_t b)
A64: FCMGT Vd.2D, Vn.2D, Vm.2D
|
CompareLessThan(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vcltq_s64 (int64x2_t a, int64x2_t b)
A64: CMGT Vd.2D, Vn.2D, Vm.2D
|
CompareLessThan(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vcltq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMHI Vd.2D, Vn.2D, Vm.2D
|
CompareLessThanOrEqual(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vcleq_f64 (float64x2_t a, float64x2_t b)
A64: FCMGE Vd.2D, Vn.2D, Vm.2D
|
CompareLessThanOrEqual(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vcleq_s64 (int64x2_t a, int64x2_t b)
A64: CMGE Vd.2D, Vn.2D, Vm.2D
|
CompareLessThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vcleq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMHS Vd.2D, Vn.2D, Vm.2D
|
CompareLessThanOrEqualScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vcle_f64 (float64x1_t a, float64x1_t b)
A64: FCMGE Dd, Dn, Dm
|
CompareLessThanOrEqualScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vcle_s64 (int64x1_t a, int64x1_t b)
A64: CMGE Dd, Dn, Dm
|
CompareLessThanOrEqualScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vcles_f32 (float32_t a, float32_t b)
A64: FCMGE Sd, Sn, Sm
|
CompareLessThanOrEqualScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vcle_u64 (uint64x1_t a, uint64x1_t b)
A64: CMHS Dd, Dn, Dm
|
CompareLessThanScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vclt_f64 (float64x1_t a, float64x1_t b)
A64: FCMGT Dd, Dn, Dm
|
CompareLessThanScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vclt_s64 (int64x1_t a, int64x1_t b)
A64: CMGT Dd, Dn, Dm
|
CompareLessThanScalar(Vector64<Single>, Vector64<Single>)
|
uint32_t vclts_f32 (float32_t a, float32_t b)
A64: FCMGT Sd, Sn, Sm
|
CompareLessThanScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vclt_u64 (uint64x1_t a, uint64x1_t b)
A64: CMHI Dd, Dn, Dm
|
CompareTest(Vector128<Double>, Vector128<Double>)
|
uint64x2_t vtstq_f64 (float64x2_t a, float64x2_t b)
A64: CmTST Vd.2D, Vn.2D, Vm.2D Powyższy podpis natywny nie istnieje. To dodatkowe przeciążenie zapewnia spójność z innymi interfejsami API skalarnymi.
|
CompareTest(Vector128<Int64>, Vector128<Int64>)
|
uint64x2_t vtstq_s64 (int64x2_t a, int64x2_t b)
A64: CMTST Vd.2D, Vn.2D, Vm.2D
|
CompareTest(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vtstq_u64 (uint64x2_t a, uint64x2_t b)
A64: CMTST Vd.2D, Vn.2D, Vm.2D
|
CompareTestScalar(Vector64<Double>, Vector64<Double>)
|
uint64x1_t vtst_f64 (float64x1_t a, float64x1_t b)
A64: CMTST Dd, Dn, Dm Powyższy podpis macierzysty nie istnieje. To dodatkowe przeciążenie zapewnia spójność z innymi interfejsami API skalarnymi.
|
CompareTestScalar(Vector64<Int64>, Vector64<Int64>)
|
uint64x1_t vtst_s64 (int64x1_t a, int64x1_t b)
A64: CMTST Dd, Dn, Dm
|
CompareTestScalar(Vector64<UInt64>, Vector64<UInt64>)
|
uint64x1_t vtst_u64 (uint64x1_t a, uint64x1_t b)
A64: CMTST Dd, Dn, Dm
|
ConvertToDouble(Vector128<Int64>)
|
float64x2_t vcvtq_f64_s64 (int64x2_t a)
A64: SCVTF Vd.2D, Vn.2D
|
ConvertToDouble(Vector128<UInt64>)
|
float64x2_t vcvtq_f64_u64 (uint64x2_t a)
A64: UCVTF Vd.2D, Vn.2D
|
ConvertToDouble(Vector64<Single>)
|
float64x2_t vcvt_f64_f32 (float32x2_t a)
A64: FCVTL Vd.2D, Vn.2S
|
ConvertToDoubleScalar(Vector64<Int64>)
|
float64x1_t vcvt_f64_s64 (int64x1_t a)
A64: SCVTF Dd, Dn
|
ConvertToDoubleScalar(Vector64<UInt64>)
|
float64x1_t vcvt_f64_u64 (uint64x1_t a)
A64: UCVTF Dd, Dn
|
ConvertToDoubleUpper(Vector128<Single>)
|
float64x2_t vcvt_high_f64_f32 (float32x4_t a)
A64: FCVTL2 Vd.2D, Vn.4S
|
ConvertToInt64RoundAwayFromZero(Vector128<Double>)
|
int64x2_t vcvtaq_s64_f64 (float64x2_t a)
A64: FCVTAS Vd.2D, Vn.2D
|
ConvertToInt64RoundAwayFromZeroScalar(Vector64<Double>)
|
int64x1_t vcvta_s64_f64 (float64x1_t a)
A64: FCVTAS Dd, Dn
|
ConvertToInt64RoundToEven(Vector128<Double>)
|
int64x2_t vcvtnq_s64_f64 (float64x2_t a)
A64: FCVTNS Vd.2D, Vn.2D
|
ConvertToInt64RoundToEvenScalar(Vector64<Double>)
|
int64x1_t vcvtn_s64_f64 (float64x1_t a)
A64: FCVTNS Dd, Dn
|
ConvertToInt64RoundToNegativeInfinity(Vector128<Double>)
|
int64x2_t vcvtmq_s64_f64 (float64x2_t a)
A64: FCVTMS Vd.2D, Vn.2D
|
ConvertToInt64RoundToNegativeInfinityScalar(Vector64<Double>)
|
int64x1_t vcvtm_s64_f64 (float64x1_t a)
A64: FCVTMS Dd, Dn
|
ConvertToInt64RoundToPositiveInfinity(Vector128<Double>)
|
int64x2_t vcvtpq_s64_f64 (float64x2_t a)
A64: FCVTPS Vd.2D, Vn.2D
|
ConvertToInt64RoundToPositiveInfinityScalar(Vector64<Double>)
|
int64x1_t vcvtp_s64_f64 (float64x1_t a)
A64: FCVTPS Dd, Dn
|
ConvertToInt64RoundToZero(Vector128<Double>)
|
int64x2_t vcvtq_s64_f64 (float64x2_t a)
A64: FCVTZS Vd.2D, Vn.2D
|
ConvertToInt64RoundToZeroScalar(Vector64<Double>)
|
int64x1_t vcvt_s64_f64 (float64x1_t a)
A64: FCVTZS Dd, Dn
|
ConvertToSingleLower(Vector128<Double>)
|
float32x2_t vcvt_f32_f64 (float64x2_t a)
A64: FCVTN Vd.2S, Vn.2D
|
ConvertToSingleRoundToOddLower(Vector128<Double>)
|
float32x2_t vcvtx_f32_f64 (float64x2_t a)
A64: FCVTXN Vd.2S, Vn.2D
|
ConvertToSingleRoundToOddUpper(Vector64<Single>, Vector128<Double>)
|
float32x4_t vcvtx_high_f32_f64 (float32x2_t r, float64x2_t a)
A64: FCVTXN2 Vd.4S, Vn.2D
|
ConvertToSingleUpper(Vector64<Single>, Vector128<Double>)
|
float32x4_t vcvt_high_f32_f64 (float32x2_t r, float64x2_t a)
A64: FCVTN2 Vd.4S, Vn.2D
|
ConvertToUInt64RoundAwayFromZero(Vector128<Double>)
|
uint64x2_t vcvtaq_u64_f64 (float64x2_t a)
A64: FCVTAU Vd.2D, Vn.2D
|
ConvertToUInt64RoundAwayFromZeroScalar(Vector64<Double>)
|
uint64x1_t vcvta_u64_f64 (float64x1_t a)
A64: FCVTAU Dd, Dn
|
ConvertToUInt64RoundToEven(Vector128<Double>)
|
uint64x2_t vcvtnq_u64_f64 (float64x2_t a)
A64: FCVTNU Vd.2D, Vn.2D
|
ConvertToUInt64RoundToEvenScalar(Vector64<Double>)
|
uint64x1_t vcvtn_u64_f64 (float64x1_t a)
A64: FCVTNU Dd, Dn
|
ConvertToUInt64RoundToNegativeInfinity(Vector128<Double>)
|
uint64x2_t vcvtmq_u64_f64 (float64x2_t a)
A64: FCVTMU Vd.2D, Vn.2D
|
ConvertToUInt64RoundToNegativeInfinityScalar(Vector64<Double>)
|
uint64x1_t vcvtm_u64_f64 (float64x1_t a)
A64: FCVTMU Dd, Dn
|
ConvertToUInt64RoundToPositiveInfinity(Vector128<Double>)
|
uint64x2_t vcvtpq_u64_f64 (float64x2_t a)
A64: FCVTPU Vd.2D, Vn.2D
|
ConvertToUInt64RoundToPositiveInfinityScalar(Vector64<Double>)
|
uint64x1_t vcvtp_u64_f64 (float64x1_t a)
A64: FCVTPU Dd, Dn
|
ConvertToUInt64RoundToZero(Vector128<Double>)
|
uint64x2_t vcvtq_u64_f64 (float64x2_t a)
A64: FCVTZU Vd.2D, Vn.2D
|
ConvertToUInt64RoundToZeroScalar(Vector64<Double>)
|
uint64x1_t vcvt_u64_f64 (float64x1_t a)
A64: FCVTZU Dd, Dn
|
Divide(Vector128<Double>, Vector128<Double>)
|
float64x2_t vdivq_f64 (float64x2_t a, float64x2_t b)
A64: FDIV Vd.2D, Vn.2D, Vm.2D
|
Divide(Vector128<Single>, Vector128<Single>)
|
float32x4_t vdivq_f32 (float32x4_t a, float32x4_t b)
A64: FDIV Vd.4S, Vn.4S, Vm.4S
|
Divide(Vector64<Single>, Vector64<Single>)
|
float32x2_t vdiv_f32 (float32x2_t a, float32x2_t b)
A64: FDIV Vd.2S, Vn.2S, Vm.2S
|
DuplicateSelectedScalarToVector128(Vector128<Double>, Byte)
|
float64x2_t vdupq_laneq_f64 (float64x2_t vec, const int lane)
A64: DUP Vd.2D, Vn.D[index]
|
DuplicateSelectedScalarToVector128(Vector128<Int64>, Byte)
|
int64x2_t vdupq_laneq_s64 (int64x2_t vec, const int lane)
A64: DUP Vd.2D, Vn.D[index]
|
DuplicateSelectedScalarToVector128(Vector128<UInt64>, Byte)
|
uint64x2_t vdupq_laneq_u64 (uint64x2_t vec, const int lane)
A64: DUP Vd.2D, Vn.D[index]
|
DuplicateToVector128(Double)
|
float64x2_t vdupq_n_f64 (wartość float64_t)
A64: DUP Vd.2D, Vn.D[0]
|
DuplicateToVector128(Int64)
|
int64x2_t vdupq_n_s64 (wartość int64_t)
A64: DUP Vd.2D, Rn
|
DuplicateToVector128(UInt64)
|
uint64x2_t vdupq_n_s64 (wartość uint64_t)
A64: DUP Vd.2D, Rn
|
Equals(Object)
|
Określa, czy dany obiekt jest taki sam, jak bieżący obiekt.
(Odziedziczone po Object)
|
ExtractNarrowingSaturateScalar(Vector64<Int16>)
|
int8_t vqmovnh_s16 (int16_t a)
A64: SQXTN Bd, Hn
|
ExtractNarrowingSaturateScalar(Vector64<Int32>)
|
int16_t vqmovns_s32 (int32_t a)
A64: SQXTN Hd, Sn
|
ExtractNarrowingSaturateScalar(Vector64<Int64>)
|
int32_t vqmovnd_s64 (int64_t a)
A64: SQXTN Sd, Dn
|
ExtractNarrowingSaturateScalar(Vector64<UInt16>)
|
uint8_t vqmovnh_u16 (uint16_t a)
A64: UQXTN Bd, Hn
|
ExtractNarrowingSaturateScalar(Vector64<UInt32>)
|
uint16_t vqmovns_u32 (uint32_t a)
A64: UQXTN Hd, Sn
|
ExtractNarrowingSaturateScalar(Vector64<UInt64>)
|
uint32_t vqmovnd_u64 (uint64_t a)
A64: UQXTN Sd, Dn
|
ExtractNarrowingSaturateUnsignedScalar(Vector64<Int16>)
|
uint8_t vqmovunh_s16 (int16_t a)
A64: SQXTUN Bd, Hn
|
ExtractNarrowingSaturateUnsignedScalar(Vector64<Int32>)
|
uint16_t vqmovuns_s32 (int32_t a)
A64: SQXTUN Hd, Sn
|
ExtractNarrowingSaturateUnsignedScalar(Vector64<Int64>)
|
uint32_t vqmovund_s64 (int64_t a)
A64: SQXTUN Sd, Dn
|
Floor(Vector128<Double>)
|
float64x2_t vrndmq_f64 (float64x2_t a)
A64: FRINTM Vd.2D, Vn.2D
|
FusedMultiplyAdd(Vector128<Double>, Vector128<Double>, Vector128<Double>)
|
float64x2_t vfmaq_f64 (float64x2_t float64x2_t b, float64x2_t c)
A64: FMLA Vd.2D, Vn.2D, Vm.2D
|
FusedMultiplyAddByScalar(Vector128<Double>, Vector128<Double>, Vector64<Double>)
|
float64x2_t vfmaq_n_f64 (float64x2_t float64x2_t b, float64_t n)
A64: FMLA Vd.2D, Vn.2D, Vm.D[0]
|
FusedMultiplyAddByScalar(Vector128<Single>, Vector128<Single>, Vector64<Single>)
|
float32x4_t vfmaq_n_f32 (float32x4_t float32x4_t b, float32_t n)
A64: FMLA Vd.4S, Vn.4S, Vm.S[0]
|
FusedMultiplyAddByScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>)
|
float32x2_t vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t n)
A64: FMLA Vd.2S, Vn.2S, Vm.S[0]
|
FusedMultiplyAddBySelectedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, Byte)
|
float64x2_t vfmaq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane)
A64: FMLA Vd.2D, Vn.2D, Vm.D[lane]
|
FusedMultiplyAddBySelectedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, Byte)
|
float32x4_t vfmaq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane)
A64: FMLA Vd.4S, Vn.4S, Vm.S[lane]
|
FusedMultiplyAddBySelectedScalar(Vector128<Single>, Vector128<Single>, Vector64<Single>, Byte)
|
float32x4_t vfmaq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane)
A64: FMLA Vd.4S, Vn.4S, Vm.S[lane]
|
FusedMultiplyAddBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector128<Single>, Byte)
|
float32x2_t vfma_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane)
A64: FMLA Vd.2S, Vn.2S, Vm.S[lane]
|
FusedMultiplyAddBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>, Byte)
|
float32x2_t vfma_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane)
A64: FMLA Vd.2S, Vn.2S, Vm.S[lane]
|
FusedMultiplyAddScalarBySelectedScalar(Vector64<Double>, Vector64<Double>, Vector128<Double>, Byte)
|
float64_t vfmad_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane)
A64: FMLA Dd, Dn, Vm.D[lane]
|
FusedMultiplyAddScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector128<Single>, Byte)
|
float32_t vfmas_laneq_f32 (float32_t, float32_t b, float32x4_t v, const int lane)
A64: FMLA Sd, Sn, Vm.S[lane]
|
FusedMultiplyAddScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>, Byte)
|
float32_t vfmas_lane_f32 (float32_t float32_t b, float32x2_t v, const int lane)
A64: FMLA Sd, Sn, Vm.S[lane]
|
FusedMultiplySubtract(Vector128<Double>, Vector128<Double>, Vector128<Double>)
|
float64x2_t vfmsq_f64 (float64x2_t float64x2_t b, float64x2_t c)
A64: FMLS Vd.2D, Vn.2D, Vm.2D
|
FusedMultiplySubtractByScalar(Vector128<Double>, Vector128<Double>, Vector64<Double>)
|
float64x2_t vfmsq_n_f64 (float64x2_t float64x2_t b, float64_t n)
A64: FMLS Vd.2D, Vn.2D, Vm.D[0]
|
FusedMultiplySubtractByScalar(Vector128<Single>, Vector128<Single>, Vector64<Single>)
|
float32x4_t vfmsq_n_f32 (float32x4_t a, float32x4_t b, float32_t n)
A64: FMLS Vd.4S, Vn.4S, Vm.S[0]
|
FusedMultiplySubtractByScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>)
|
float32x2_t vfms_n_f32 (float32x2_t float32x2_t b, float32_t n)
A64: FMLS Vd.2S, Vn.2S, Vm.S[0]
|
FusedMultiplySubtractBySelectedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, Byte)
|
float64x2_t vfmsq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane)
A64: FMLS Vd.2D, Vn.2D, Vm.D[lane]
|
FusedMultiplySubtractBySelectedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, Byte)
|
float32x4_t vfmsq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane)
A64: FMLS Vd.4S, Vn.4S, Vm.S[lane]
|
FusedMultiplySubtractBySelectedScalar(Vector128<Single>, Vector128<Single>, Vector64<Single>, Byte)
|
float32x4_t vfmsq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane)
A64: FMLS Vd.4S, Vn.4S, Vm.S[lane]
|
FusedMultiplySubtractBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector128<Single>, Byte)
|
float32x2_t vfms_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane)
A64: FMLS Vd.2S, Vn.2S, Vm.S[lane]
|
FusedMultiplySubtractBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>, Byte)
|
float32x2_t vfms_lane_f32 (float32x2_t, float32x2_t b, float32x2_t v, const int lane)
A64: FMLS Vd.2S, Vn.2S, Vm.S[lane]
|
FusedMultiplySubtractScalarBySelectedScalar(Vector64<Double>, Vector64<Double>, Vector128<Double>, Byte)
|
float64_t vfmsd_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane)
A64: FMLS Dd, Dn, Vm.D[lane]
|
FusedMultiplySubtractScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector128<Single>, Byte)
|
float32_t vfmss_laneq_f32 (float32_t a, float32_t b, float32x4_t v, const int lane)
A64: FMLS Sd, Sn, Vm.S[lane]
|
FusedMultiplySubtractScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>, Byte)
|
float32_t vfmss_lane_f32 (float32_t, float32_t b, float32x2_t v, const int lane)
A64: FMLS Sd, Sn, Vm.S[lane]
|
GetHashCode()
|
Służy jako domyślna funkcja skrótu.
(Odziedziczone po Object)
|
GetType()
|
Type Pobiera bieżące wystąpienie.
(Odziedziczone po Object)
|
InsertSelectedScalar(Vector128<Byte>, Byte, Vector128<Byte>, Byte)
|
uint8x16_t vcopyq_laneq_u8 (uint8x16_t, const int lane1, uint8x16_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector128<Byte>, Byte, Vector64<Byte>, Byte)
|
uint8x16_t vcopyq_lane_u8 (uint8x16_t, const int lane1, uint8x8_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector128<Double>, Byte, Vector128<Double>, Byte)
|
float64x2_t vcopyq_laneq_f64 (float64x2_t, const int lane1, float64x2_t b, const int lane2)
A64: INS Vd.D[lane1], Vn.D[lane2]
|
InsertSelectedScalar(Vector128<Int16>, Byte, Vector128<Int16>, Byte)
|
int16x8_t vcopyq_laneq_s16 (int16x8_t, const int lane1, int16x8_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector128<Int16>, Byte, Vector64<Int16>, Byte)
|
int16x8_t vcopyq_lane_s16 (int16x8_t, const int lane1, int16x4_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector128<Int32>, Byte, Vector128<Int32>, Byte)
|
int32x4_t vcopyq_laneq_s32 (int32x4_t, const int lane1, int32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<Int32>, Byte, Vector64<Int32>, Byte)
|
int32x4_t vcopyq_lane_s32 (int32x4_t, const int lane1, int32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<Int64>, Byte, Vector128<Int64>, Byte)
|
int64x2_t vcopyq_laneq_s64 (int64x2_t, const int lane1, int64x2_t b, const int lane2)
A64: INS Vd.D[lane1], Vn.D[lane2]
|
InsertSelectedScalar(Vector128<SByte>, Byte, Vector128<SByte>, Byte)
|
int8x16_t vcopyq_laneq_s8 (int8x16_t, const int lane1, int8x16_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector128<SByte>, Byte, Vector64<SByte>, Byte)
|
int8x16_t vcopyq_lane_s8 (int8x16_t, const int lane1, int8x8_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector128<Single>, Byte, Vector128<Single>, Byte)
|
float32x4_t vcopyq_laneq_f32 (float32x4_t, const int lane1, float32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<Single>, Byte, Vector64<Single>, Byte)
|
float32x4_t vcopyq_lane_f32 (float32x4_t, const int lane1, float32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<UInt16>, Byte, Vector128<UInt16>, Byte)
|
uint16x8_t vcopyq_laneq_u16 (uint16x8_t, const int lane1, uint16x8_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector128<UInt16>, Byte, Vector64<UInt16>, Byte)
|
uint16x8_t vcopyq_lane_u16 (uint16x8_t, const int lane1, uint16x4_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector128<UInt32>, Byte, Vector128<UInt32>, Byte)
|
uint32x4_t vcopyq_laneq_u32 (uint32x4_t, const int lane1, uint32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<UInt32>, Byte, Vector64<UInt32>, Byte)
|
uint32x4_t vcopyq_lane_u32 (uint32x4_t, const int lane1, uint32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector128<UInt64>, Byte, Vector128<UInt64>, Byte)
|
uint64x2_t vcopyq_laneq_u64 (uint64x2_t, const int lane1, uint64x2_t b, const int lane2)
A64: INS Vd.D[lane1], Vn.D[lane2]
|
InsertSelectedScalar(Vector64<Byte>, Byte, Vector128<Byte>, Byte)
|
uint8x8_t vcopy_laneq_u8 (uint8x8_t, const int lane1, uint8x16_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector64<Byte>, Byte, Vector64<Byte>, Byte)
|
uint8x8_t vcopy_lane_u8 (uint8x8_t, const int lane1, uint8x8_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector64<Int16>, Byte, Vector128<Int16>, Byte)
|
int16x4_t vcopy_laneq_s16 (int16x4_t, const int lane1, int16x8_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector64<Int16>, Byte, Vector64<Int16>, Byte)
|
int16x4_t vcopy_lane_s16 (int16x4_t, const int lane1, int16x4_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector64<Int32>, Byte, Vector128<Int32>, Byte)
|
int32x2_t vcopy_laneq_s32 (int32x2_t, const int lane1, int32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector64<Int32>, Byte, Vector64<Int32>, Byte)
|
int32x2_t vcopy_lane_s32 (int32x2_t, const int lane1, int32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector64<SByte>, Byte, Vector128<SByte>, Byte)
|
int8x8_t vcopy_laneq_s8 (int8x8_t, const int lane1, int8x16_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector64<SByte>, Byte, Vector64<SByte>, Byte)
|
int8x8_t vcopy_lane_s8 (int8x8_t, const int lane1, int8x8_t b, const int lane2)
A64: INS Vd.B[lane1], Vn.B[lane2]
|
InsertSelectedScalar(Vector64<Single>, Byte, Vector128<Single>, Byte)
|
float32x2_t vcopy_laneq_f32 (float32x2_t, const int lane1, float32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector64<Single>, Byte, Vector64<Single>, Byte)
|
float32x2_t vcopy_lane_f32 (float32x2_t, const int lane1, float32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector64<UInt16>, Byte, Vector128<UInt16>, Byte)
|
uint16x4_t vcopy_laneq_u16 (uint16x4_t, const int lane1, uint16x8_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector64<UInt16>, Byte, Vector64<UInt16>, Byte)
|
uint16x4_t vcopy_lane_u16 (uint16x4_t a, const int lane1, uint16x4_t b, const int lane2)
A64: INS Vd.H[lane1], Vn.H[lane2]
|
InsertSelectedScalar(Vector64<UInt32>, Byte, Vector128<UInt32>, Byte)
|
uint32x2_t vcopy_laneq_u32 (uint32x2_t, const int lane1, uint32x4_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
InsertSelectedScalar(Vector64<UInt32>, Byte, Vector64<UInt32>, Byte)
|
uint32x2_t vcopy_lane_u32 (uint32x2_t, const int lane1, uint32x2_t b, const int lane2)
A64: INS Vd.S[lane1], Vn.S[lane2]
|
LoadAndInsertScalar(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Byte, Byte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Byte, Byte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Byte>,Vector128<Byte>>, Byte, Byte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>,Vector128<Double>>, Byte, Double*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>>, Byte, Double*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Double>,Vector128<Double>>, Byte, Double*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>, Byte, Int16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>, Byte, Int16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int16>,Vector128<Int16>>, Byte, Int16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>, Byte, Int32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>, Byte, Int32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int32>,Vector128<Int32>>, Byte, Int32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>, Byte, Int64*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>, Byte, Int64*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Int64>,Vector128<Int64>>, Byte, Int64*)
|
LoadAndInsertScalar(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Byte, SByte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Byte, SByte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<SByte>,Vector128<SByte>>, Byte, SByte*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>,Vector128<Single>>, Byte, Single*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>>, Byte, Single*)
|
LoadAndInsertScalar(ValueTuple<Vector128<Single>,Vector128<Single>>, Byte, Single*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>, Byte, UInt16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>, Byte, UInt16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt16>,Vector128<UInt16>>, Byte, UInt16*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>, Byte, UInt32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>, Byte, UInt32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt32>,Vector128<UInt32>>, Byte, UInt32*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>, Byte, UInt64*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>, Byte, UInt64*)
|
LoadAndInsertScalar(ValueTuple<Vector128<UInt64>,Vector128<UInt64>>, Byte, UInt64*)
|
LoadAndReplicateToVector128(Double*)
|
float64x2_t vld1q_dup_f64 (float64_t const * ptr)
A64: LD1R { Vt.2D }, [Xn]
|
LoadAndReplicateToVector128(Int64*)
|
int64x2_t vld1q_dup_s64 (int64_t const * ptr)
A64: LD1R { Vt.2D }, [Xn]
|
LoadAndReplicateToVector128(UInt64*)
|
uint64x2_t vld1q_dup_u64 (uint64_t const * ptr)
A64: LD1R { Vt.2D }, [Xn]
|
LoadAndReplicateToVector128x2(Byte*)
|
LoadAndReplicateToVector128x2(Double*)
|
LoadAndReplicateToVector128x2(Int16*)
|
LoadAndReplicateToVector128x2(Int32*)
|
LoadAndReplicateToVector128x2(Int64*)
|
LoadAndReplicateToVector128x2(SByte*)
|
LoadAndReplicateToVector128x2(Single*)
|
LoadAndReplicateToVector128x2(UInt16*)
|
LoadAndReplicateToVector128x2(UInt32*)
|
LoadAndReplicateToVector128x2(UInt64*)
|
LoadAndReplicateToVector128x3(Byte*)
|
LoadAndReplicateToVector128x3(Double*)
|
LoadAndReplicateToVector128x3(Int16*)
|
LoadAndReplicateToVector128x3(Int32*)
|
LoadAndReplicateToVector128x3(Int64*)
|
LoadAndReplicateToVector128x3(SByte*)
|
LoadAndReplicateToVector128x3(Single*)
|
LoadAndReplicateToVector128x3(UInt16*)
|
LoadAndReplicateToVector128x3(UInt32*)
|
LoadAndReplicateToVector128x3(UInt64*)
|
LoadAndReplicateToVector128x4(Byte*)
|
LoadAndReplicateToVector128x4(Double*)
|
LoadAndReplicateToVector128x4(Int16*)
|
LoadAndReplicateToVector128x4(Int32*)
|
LoadAndReplicateToVector128x4(Int64*)
|
LoadAndReplicateToVector128x4(SByte*)
|
LoadAndReplicateToVector128x4(Single*)
|
LoadAndReplicateToVector128x4(UInt16*)
|
LoadAndReplicateToVector128x4(UInt32*)
|
LoadAndReplicateToVector128x4(UInt64*)
|
LoadPairScalarVector64(Int32*)
|
A64: LDP St1, St2, [Xn]
|
LoadPairScalarVector64(Single*)
|
A64: LDP St1, St2, [Xn]
|
LoadPairScalarVector64(UInt32*)
|
A64: LDP St1, St2, [Xn]
|
LoadPairScalarVector64NonTemporal(Int32*)
|
A64: LDNP St1, St2, [Xn]
|
LoadPairScalarVector64NonTemporal(Single*)
|
A64: LDNP St1, St2, [Xn]
|
LoadPairScalarVector64NonTemporal(UInt32*)
|
A64: LDNP St1, St2, [Xn]
|
LoadPairVector128(Byte*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(Double*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(Int16*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(Int32*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(Int64*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(SByte*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(Single*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(UInt16*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(UInt32*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128(UInt64*)
|
A64: LDP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Byte*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Double*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Int16*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Int32*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Int64*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(SByte*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(Single*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(UInt16*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(UInt32*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector128NonTemporal(UInt64*)
|
A64: LDNP Qt1, Qt2, [Xn]
|
LoadPairVector64(Byte*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(Double*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(Int16*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(Int32*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(Int64*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(SByte*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(Single*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(UInt16*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(UInt32*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64(UInt64*)
|
A64: LDP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Byte*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Double*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Int16*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Int32*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Int64*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(SByte*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(Single*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(UInt16*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(UInt32*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadPairVector64NonTemporal(UInt64*)
|
A64: LDNP Dt1, Dt2, [Xn]
|
LoadVector128x2(Byte*)
|
LoadVector128x2(Double*)
|
LoadVector128x2(Int16*)
|
LoadVector128x2(Int32*)
|
LoadVector128x2(Int64*)
|
LoadVector128x2(SByte*)
|
LoadVector128x2(Single*)
|
LoadVector128x2(UInt16*)
|
LoadVector128x2(UInt32*)
|
LoadVector128x2(UInt64*)
|
LoadVector128x2AndUnzip(Byte*)
|
LoadVector128x2AndUnzip(Double*)
|
LoadVector128x2AndUnzip(Int16*)
|
LoadVector128x2AndUnzip(Int32*)
|
LoadVector128x2AndUnzip(Int64*)
|
LoadVector128x2AndUnzip(SByte*)
|
LoadVector128x2AndUnzip(Single*)
|
LoadVector128x2AndUnzip(UInt16*)
|
LoadVector128x2AndUnzip(UInt32*)
|
LoadVector128x2AndUnzip(UInt64*)
|
LoadVector128x3(Byte*)
|
LoadVector128x3(Double*)
|
LoadVector128x3(Int16*)
|
LoadVector128x3(Int32*)
|
LoadVector128x3(Int64*)
|
LoadVector128x3(SByte*)
|
LoadVector128x3(Single*)
|
LoadVector128x3(UInt16*)
|
LoadVector128x3(UInt32*)
|
LoadVector128x3(UInt64*)
|
LoadVector128x3AndUnzip(Byte*)
|
LoadVector128x3AndUnzip(Double*)
|
LoadVector128x3AndUnzip(Int16*)
|
LoadVector128x3AndUnzip(Int32*)
|
LoadVector128x3AndUnzip(Int64*)
|
LoadVector128x3AndUnzip(SByte*)
|
LoadVector128x3AndUnzip(Single*)
|
LoadVector128x3AndUnzip(UInt16*)
|
LoadVector128x3AndUnzip(UInt32*)
|
LoadVector128x3AndUnzip(UInt64*)
|
LoadVector128x4(Byte*)
|
LoadVector128x4(Double*)
|
LoadVector128x4(Int16*)
|
LoadVector128x4(Int32*)
|
LoadVector128x4(Int64*)
|
LoadVector128x4(SByte*)
|
LoadVector128x4(Single*)
|
LoadVector128x4(UInt16*)
|
LoadVector128x4(UInt32*)
|
LoadVector128x4(UInt64*)
|
LoadVector128x4AndUnzip(Byte*)
|
LoadVector128x4AndUnzip(Double*)
|
LoadVector128x4AndUnzip(Int16*)
|
LoadVector128x4AndUnzip(Int32*)
|
LoadVector128x4AndUnzip(Int64*)
|
LoadVector128x4AndUnzip(SByte*)
|
LoadVector128x4AndUnzip(Single*)
|
LoadVector128x4AndUnzip(UInt16*)
|
LoadVector128x4AndUnzip(UInt32*)
|
LoadVector128x4AndUnzip(UInt64*)
|
Max(Vector128<Double>, Vector128<Double>)
|
float64x2_t vmaxq_f64 (float64x2_t a, float64x2_t b)
A64: FMAX Vd.2D, Vn.2D, Vm.2D
|
MaxAcross(Vector128<Byte>)
|
uint8_t vmaxvq_u8 (uint8x16_t a)
A64: UMAXV Bd, Vn.16B
|
MaxAcross(Vector128<Int16>)
|
int16_t vmaxvq_s16 (int16x8_t a)
A64: SMAXV Hd, Vn.8H
|
MaxAcross(Vector128<Int32>)
|
int32_t vmaxvq_s32 (int32x4_t a)
A64: SMAXV Sd, Vn.4S
|
MaxAcross(Vector128<SByte>)
|
int8_t vmaxvq_s8 (int8x16_t a)
A64: SMAXV Bd, Vn.16B
|
MaxAcross(Vector128<Single>)
|
float32_t vmaxvq_f32 (float32x4_t a)
A64: FMAXV Sd, Vn.4S
|
MaxAcross(Vector128<UInt16>)
|
uint16_t vmaxvq_u16 (uint16x8_t a)
A64: UMAXV Hd, Vn.8H
|
MaxAcross(Vector128<UInt32>)
|
uint32_t vmaxvq_u32 (uint32x4_t a)
A64: UMAXV Sd, Vn.4S
|
MaxAcross(Vector64<Byte>)
|
uint8_t vmaxv_u8 (uint8x8_t a)
A64: UMAXV Bd, Vn.8B
|
MaxAcross(Vector64<Int16>)
|
int16_t vmaxv_s16 (int16x4_t a)
A64: SMAXV Hd, Vn.4H
|
MaxAcross(Vector64<SByte>)
|
int8_t vmaxv_s8 (int8x8_t a)
A64: SMAXV Bd, Vn.8B
|
MaxAcross(Vector64<UInt16>)
|
uint16_t vmaxv_u16 (uint16x4_t a)
A64: UMAXV Hd, Vn.4H
|
MaxNumber(Vector128<Double>, Vector128<Double>)
|
float64x2_t vmaxnmq_f64 (float64x2_t a, float64x2_t b)
A64: FMAXNM Vd.2D, Vn.2D, Vm.2D
|
MaxNumberAcross(Vector128<Single>)
|
float32_t vmaxnmvq_f32 (float32x4_t a)
A64: FMAXNMV Sd, Vn.4S
|
MaxNumberPairwise(Vector128<Double>, Vector128<Double>)
|
float64x2_t vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
A64: FMAXNMP Vd.2D, Vn.2D, Vm.2D
|
MaxNumberPairwise(Vector128<Single>, Vector128<Single>)
|
float32x4_t vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
A64: FMAXNMP Vd.4S, Vn.4S, Vm.4S
|
MaxNumberPairwise(Vector64<Single>, Vector64<Single>)
|
float32x2_t vpmaxnm_f32 (float32x2_t a, float32x2_t b)
A64: FMAXNMP Vd.2S, Vn.2S, Vm.2S
|
MaxNumberPairwiseScalar(Vector128<Double>)
|
float64_t vpmaxnmqd_f64 (float64x2_t a)
A64: FMAXNMP Dd, Vn.2D
|
MaxNumberPairwiseScalar(Vector64<Single>)
|
float32_t vpmaxnms_f32 (float32x2_t a)
A64: FMAXNMP Sd, Vn.2S
|
MaxPairwise(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
A64: UMAXP Vd.16B, Vn.16B, Vm.16B
|
MaxPairwise(Vector128<Double>, Vector128<Double>)
|
float64x2_t vpmaxq_f64 (float64x2_t a, float64x2_t b)
A64: FMAXP Vd.2D, Vn.2D, Vm.2D
|
MaxPairwise(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vpmaxq_s16 (int16x8_t a, int16x8_t b)
A64: SMAXP Vd.8H, Vn.8H, Vm.8H
|
MaxPairwise(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vpmaxq_s32 (int32x4_t a, int32x4_t b)
A64: SMAXP Vd.4S, Vn.4S, Vm.4S
|
MaxPairwise(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vpmaxq_s8 (int8x16_t a, int8x16_t b)
A64: SMAXP Vd.16B, Vn.16B, Vm.16B
|
MaxPairwise(Vector128<Single>, Vector128<Single>)
|
float32x4_t vpmaxq_f32 (float32x4_t a, float32x4_t b)
A64: FMAXP Vd.4S, Vn.4S, Vm.4S
|
MaxPairwise(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
A64: UMAXP Vd.8H, Vn.8H, Vm.8H
|
MaxPairwise(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
A64: UMAXP Vd.4S, Vn.4S, Vm.4S
|
MaxPairwiseScalar(Vector128<Double>)
|
float64_t vpmaxqd_f64 (float64x2_t a)
A64: FMAXP Dd, Vn.2D
|
MaxPairwiseScalar(Vector64<Single>)
|
float32_t vpmaxs_f32 (float32x2_t a)
A64: FMAXP Sd, Vn.2S
|
MaxScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vmax_f64 (float64x1_t a, float64x1_t b)
A64: FMAX Dd, Dn, Dm
|
MaxScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vmaxs_f32 (float32_t a, float32_t b)
A64: FMAX Sd, Sn, Sm Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie pod kątem spójności z innymi interfejsami API skalarnymi.
|
MemberwiseClone()
|
Tworzy płytkią kopię bieżącego Objectelementu .
(Odziedziczone po Object)
|
Min(Vector128<Double>, Vector128<Double>)
|
float64x2_t vminq_f64 (float64x2_t a, float64x2_t b)
A64: FMIN Vd.2D, Vn.2D, Vm.2D
|
MinAcross(Vector128<Byte>)
|
uint8_t vminvq_u8 (uint8x16_t a)
A64: UMINV Bd, Vn.16B
|
MinAcross(Vector128<Int16>)
|
int16_t vminvq_s16 (int16x8_t a)
A64: SMINV Hd, Vn.8H
|
MinAcross(Vector128<Int32>)
|
int32_t vaddvq_s32 (int32x4_t a)
A64: SMINV Sd, Vn.4S
|
MinAcross(Vector128<SByte>)
|
int8_t vminvq_s8 (int8x16_t a)
A64: SMINV Bd, Vn.16B
|
MinAcross(Vector128<Single>)
|
float32_t vminvq_f32 (float32x4_t a)
A64: FMINV Sd, Vn.4S
|
MinAcross(Vector128<UInt16>)
|
uint16_t vminvq_u16 (uint16x8_t a)
A64: UMINV Hd, Vn.8H
|
MinAcross(Vector128<UInt32>)
|
uint32_t vminvq_u32 (uint32x4_t a)
A64: UMINV Sd, Vn.4S
|
MinAcross(Vector64<Byte>)
|
uint8_t vminv_u8 (uint8x8_t a)
A64: UMINV Bd, Vn.8B
|
MinAcross(Vector64<Int16>)
|
int16_t vminv_s16 (int16x4_t a)
A64: SMINV Hd, Vn.4H
|
MinAcross(Vector64<SByte>)
|
int8_t vminv_s8 (int8x8_t a)
A64: SMINV Bd, Vn.8B
|
MinAcross(Vector64<UInt16>)
|
uint16_t vminv_u16 (uint16x4_t a)
A64: UMINV Hd, Vn.4H
|
MinNumber(Vector128<Double>, Vector128<Double>)
|
float64x2_t vminnmq_f64 (float64x2_t a, float64x2_t b)
A64: FMINNM Vd.2D, Vn.2D, Vm.2D
|
MinNumberAcross(Vector128<Single>)
|
float32_t vminnmvq_f32 (float32x4_t a)
A64: FMINNMV Sd, Vn.4S
|
MinNumberPairwise(Vector128<Double>, Vector128<Double>)
|
float64x2_t vpminnmq_f64 (float64x2_t a, float64x2_t b)
A64: FMINNMP Vd.2D, Vn.2D, Vm.2D
|
MinNumberPairwise(Vector128<Single>, Vector128<Single>)
|
float32x4_t vpminnmq_f32 (float32x4_t a, float32x4_t b)
A64: FMINNMP Vd.4S, Vn.4S, Vm.4S
|
MinNumberPairwise(Vector64<Single>, Vector64<Single>)
|
float32x2_t vpminnm_f32 (float32x2_t a, float32x2_t b)
A64: FMINNMP Vd.2S, Vn.2S, Vm.2S
|
MinNumberPairwiseScalar(Vector128<Double>)
|
float64_t vpminnmqd_f64 (float64x2_t a)
A64: FMINNMP Dd, Vn.2D
|
MinNumberPairwiseScalar(Vector64<Single>)
|
float32_t vpminnms_f32 (float32x2_t a)
A64: FMINNMP Sd, Vn.2S
|
MinPairwise(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vpminq_u8 (uint8x16_t a, uint8x16_t b)
A64: UMINP Vd.16B, Vn.16B, Vm.16B
|
MinPairwise(Vector128<Double>, Vector128<Double>)
|
float64x2_t vpminq_f64 (float64x2_t a, float64x2_t b)
A64: FMINP Vd.2D, Vn.2D, Vm.2D
|
MinPairwise(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vpminq_s16 (int16x8_t a, int16x8_t b)
A64: SMINP Vd.8H, Vn.8H, Vm.8H
|
MinPairwise(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vpminq_s32 (int32x4_t a, int32x4_t b)
A64: SMINP Vd.4S, Vn.4S, Vm.4S
|
MinPairwise(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vpminq_s8 (int8x16_t a, int8x16_t b)
A64: SMINP Vd.16B, Vn.16B, Vm.16B
|
MinPairwise(Vector128<Single>, Vector128<Single>)
|
float32x4_t vpminq_f32 (float32x4_t a, float32x4_t b)
A64: FMINP Vd.4S, Vn.4S, Vm.4S
|
MinPairwise(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vpminq_u16 (uint16x8_t a, uint16x8_t b)
A64: UMINP Vd.8H, Vn.8H, Vm.8H
|
MinPairwise(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vpminq_u32 (uint32x4_t a, uint32x4_t b)
A64: UMINP Vd.4S, Vn.4S, Vm.4S
|
MinPairwiseScalar(Vector128<Double>)
|
float64_t vpminqd_f64 (float64x2_t a)
A64: FMINP Dd, Vn.2D
|
MinPairwiseScalar(Vector64<Single>)
|
float32_t vpmins_f32 (float32x2_t a)
A64: FMINP Sd, Vn.2S
|
MinScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vmin_f64 (float64x1_t a, float64x1_t b)
A64: FMIN Dd, Dn, Dm
|
MinScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vmins_f32 (float32_t a, float32_t b)
A64: FMIN Sd, Sn, Sm Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie pod kątem spójności z innymi interfejsami API skalarnymi.
|
Multiply(Vector128<Double>, Vector128<Double>)
|
float64x2_t vmulq_f64 (float64x2_t a, float64x2_t b)
A64: FMUL Vd.2D, Vn.2D, Vm.2D
|
MultiplyByScalar(Vector128<Double>, Vector64<Double>)
|
float64x2_t vmulq_n_f64 (float64x2_t a, float64_t b)
A64: FMUL Vd.2D, Vn.2D, Vm.D[0]
|
MultiplyBySelectedScalar(Vector128<Double>, Vector128<Double>, Byte)
|
float64x2_t vmulq_laneq_f64 (float64x2_t float64x2_t v, const int lane)
A64: FMUL Vd.2D, Vn.2D, Vm.D[lane]
|
MultiplyDoublingSaturateHighScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqdmulhh_s16 (int16_t a, int16_t b)
A64: SQDMULH Hd, Hn, Hm
|
MultiplyDoublingSaturateHighScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqdmulhs_s32 (int32_t a, int32_t b)
A64: SQDMULH Sd, Sn, Sm
|
MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int16>, Vector128<Int16>, Byte)
|
int16_t vqdmulhh_laneq_s16 (int16_t, int16x8_t v, const int lane)
A64: SQDMULH Hd, Hn, Vm.H[lane]
|
MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>, Byte)
|
int16_t vqdmulhh_lane_s16 (int16_t, int16x4_t v, const int lane)
A64: SQDMULH Hd, Hn, Vm.H[lane]
|
MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int32>, Vector128<Int32>, Byte)
|
int32_t vqdmulhs_laneq_s32 (int32_t, int32x4_t v, const int lane)
A64: SQDMULH Sd, Sn, Vm.S[lane]
|
MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>, Byte)
|
int32_t vqdmulhs_lane_s32 (int32_t int32x2_t v, const int lane)
A64: SQDMULH Sd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningAndAddSaturateScalar(Vector64<Int32>, Vector64<Int16>, Vector64<Int16>)
|
int32_t vqdmlalh_s16 (int32_t int16_t b, int16_t c)
A64: SQDMLAL Sd, Hn, Hm
|
MultiplyDoublingWideningAndAddSaturateScalar(Vector64<Int64>, Vector64<Int32>, Vector64<Int32>)
|
int64_t vqdmlals_s32 (int64_t a, int32_t b, int32_t c)
A64: SQDMLAL Dd, Sn, Sm
|
MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<Int32>, Vector64<Int16>, Vector64<Int16>)
|
int32_t vqdmlslh_s16 (int32_t int16_t b, int16_t c)
A64: SQDMLSL Sd, Hn, Hm
|
MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<Int64>, Vector64<Int32>, Vector64<Int32>)
|
int64_t vqdmlsls_s32 (int64_t a, int32_t b, int32_t c)
A64: SQDMLSL Dd, Sn, Sm
|
MultiplyDoublingWideningSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
int32_t vqdmullh_s16 (int16_t a, int16_t b)
A64: SQDMULL Sd, Hn, Hm
|
MultiplyDoublingWideningSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
int64_t vqdmulls_s32 (int32_t a, int32_t b)
A64: SQDMULL Dd, Sn, Sm
|
MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<Int16>, Vector128<Int16>, Byte)
|
int32_t vqdmullh_laneq_s16 (int16_t, int16x8_t v, const int lane)
A64: SQDMULL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Byte)
|
int32_t vqdmullh_lane_s16 (int16_t, int16x4_t v, const int lane)
A64: SQDMULL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<Int32>, Vector128<Int32>, Byte)
|
int64_t vqdmulls_laneq_s32 (int32_t int32x4_t v, const int lane)
A64: SQDMULL Dd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Byte)
|
int64_t vqdmulls_lane_s32 (int32_t, int32x2_t v, const int lane)
A64: SQDMULL Dd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<Int32>, Vector64<Int16>, Vector128<Int16>, Byte)
|
int32_t vqdmlalh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane)
A64: SQDMLAL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<Int32>, Vector64<Int16>, Vector64<Int16>, Byte)
|
int32_t vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane)
A64: SQDMLAL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<Int64>, Vector64<Int32>, Vector128<Int32>, Byte)
|
int64_t vqdmlals_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane)
A64: SQDMLAL Dd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<Int64>, Vector64<Int32>, Vector64<Int32>, Byte)
|
int64_t vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane)
A64: SQDMLAL Dd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<Int32>, Vector64<Int16>, Vector128<Int16>, Byte)
|
int32_t vqdmlslh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane)
A64: SQDMLSL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<Int32>, Vector64<Int16>, Vector64<Int16>, Byte)
|
int32_t vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane)
A64: SQDMLSL Sd, Hn, Vm.H[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<Int64>, Vector64<Int32>, Vector128<Int32>, Byte)
|
int64_t vqdmlsls_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane)
A64: SQDMLSL Dd, Sn, Vm.S[lane]
|
MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<Int64>, Vector64<Int32>, Vector64<Int32>, Byte)
|
int64_t vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane)
A64: SQDMLSL Dd, Sn, Vm.S[lane]
|
MultiplyExtended(Vector128<Double>, Vector128<Double>)
|
float64x2_t vmulxq_f64 (float64x2_t a, float64x2_t b)
A64: FMULX Vd.2D, Vn.2D, Vm.2D
|
MultiplyExtended(Vector128<Single>, Vector128<Single>)
|
float32x4_t vmulxq_f32 (float32x4_t a, float32x4_t b)
A64: FMULX Vd.4S, Vn.4S, Vm.4S
|
MultiplyExtended(Vector64<Single>, Vector64<Single>)
|
float32x2_t vmulx_f32 (float32x2_t a, float32x2_t b)
A64: FMULX Vd.2S, Vn.2S, Vm.2S
|
MultiplyExtendedByScalar(Vector128<Double>, Vector64<Double>)
|
float64x2_t vmulxq_lane_f64 (float64x2_t, float64x1_t v, const int lane)
A64: FMULX Vd.2D, Vn.2D, Vm.D[0]
|
MultiplyExtendedBySelectedScalar(Vector128<Double>, Vector128<Double>, Byte)
|
float64x2_t vmulxq_laneq_f64 (float64x2_t, float64x2_t v, const int lane)
A64: FMULX Vd.2D, Vn.2D, Vm.D[lane]
|
MultiplyExtendedBySelectedScalar(Vector128<Single>, Vector128<Single>, Byte)
|
float32x4_t vmulxq_laneq_f32 (float32x4_t, float32x4_t v, const int lane)
A64: FMULX Vd.4S, Vn.4S, Vm.S[lane]
|
MultiplyExtendedBySelectedScalar(Vector128<Single>, Vector64<Single>, Byte)
|
float32x4_t vmulxq_lane_f32 (float32x4_t, float32x2_t v, const int lane)
A64: FMULX Vd.4S, Vn.4S, Vm.S[lane]
|
MultiplyExtendedBySelectedScalar(Vector64<Single>, Vector128<Single>, Byte)
|
float32x2_t vmulx_laneq_f32 (float32x2_t, float32x4_t v, const int lane)
A64: FMULX Vd.2S, Vn.2S, Vm.S[lane]
|
MultiplyExtendedBySelectedScalar(Vector64<Single>, Vector64<Single>, Byte)
|
float32x2_t vmulx_lane_f32 (float32x2_t, float32x2_t v, const int lane)
A64: FMULX Vd.2S, Vn.2S, Vm.S[lane]
|
MultiplyExtendedScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vmulx_f64 (float64x1_t a, float64x1_t b)
A64: FMULX Dd, Dn, Dm
|
MultiplyExtendedScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vmulxs_f32 (float32_t a, float32_t b)
A64: FMULX Sd, Sn, Sm
|
MultiplyExtendedScalarBySelectedScalar(Vector64<Double>, Vector128<Double>, Byte)
|
float64_t vmulxd_laneq_f64 (float64_t, float64x2_t v, const int lane)
A64: FMULX Dd, Dn, Vm.D[lane]
|
MultiplyExtendedScalarBySelectedScalar(Vector64<Single>, Vector128<Single>, Byte)
|
float32_t vmulxs_laneq_f32 (float32_t, float32x4_t v, const int lane)
A64: FMULX Sd, Sn, Vm.S[lane]
|
MultiplyExtendedScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Byte)
|
float32_t vmulxs_lane_f32 (float32_t, float32x2_t v, const int lane)
A64: FMULX Sd, Sn, Vm.S[lane]
|
MultiplyRoundedDoublingSaturateHighScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqrdmulhh_s16 (int16_t a, int16_t b)
A64: SQRDMULH Hd, Hn, Hm
|
MultiplyRoundedDoublingSaturateHighScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqrdmulhs_s32 (int32_t a, int32_t b)
A64: SQRDMULH Sd, Sn, Sm
|
MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int16>, Vector128<Int16>, Byte)
|
int16_t vqrdmulhh_laneq_s16 (int16_t, int16x8_t v, const int lane)
A64: SQRDMULH Hd, Hn, Vm.H[lane]
|
MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>, Byte)
|
int16_t vqrdmulhh_lane_s16 (int16_t, int16x4_t v, const int lane)
A64: SQRDMULH Hd, Hn, Vm.H[lane]
|
MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int32>, Vector128<Int32>, Byte)
|
int32_t vqrdmulhs_laneq_s32 (int32_t, int32x4_t v, const int lane)
A64: SQRDMULH Sd, Sn, Vm.S[lane]
|
MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>, Byte)
|
int32_t vqrdmulhs_lane_s32 (int32_t, int32x2_t v, const int lane)
A64: SQRDMULH Sd, Sn, Vm.S[lane]
|
MultiplyScalarBySelectedScalar(Vector64<Double>, Vector128<Double>, Byte)
|
float64_t vmuld_laneq_f64 (float64_t, float64x2_t v, const int lane)
A64: FMUL Dd, Dn, Vm.D[lane]
|
Negate(Vector128<Double>)
|
float64x2_t vnegq_f64 (float64x2_t a)
A64: FNEG Vd.2D, Vn.2D
|
Negate(Vector128<Int64>)
|
int64x2_t vnegq_s64 (int64x2_t a)
A64: NEG Vd.2D, Vn.2D
|
NegateSaturate(Vector128<Int64>)
|
int64x2_t vqnegq_s64 (int64x2_t a)
A64: SQNEG Vd.2D, Vn.2D
|
NegateSaturateScalar(Vector64<Int16>)
|
int16_t vqnegh_s16 (int16_t a)
A64: SQNEG Hd, Hn
|
NegateSaturateScalar(Vector64<Int32>)
|
int32_t vqnegs_s32 (int32_t a)
A64: SQNEG Sd, Sn
|
NegateSaturateScalar(Vector64<Int64>)
|
int64_t vqnegd_s64 (int64_t a)
A64: SQNEG Dd, Dn
|
NegateSaturateScalar(Vector64<SByte>)
|
int8_t vqnegb_s8 (int8_t a)
A64: SQNEG Bd, Bn
|
NegateScalar(Vector64<Int64>)
|
int64x1_t vneg_s64 (int64x1_t a)
A64: NEG Dd, Dn
|
ReciprocalEstimate(Vector128<Double>)
|
float64x2_t vrecpeq_f64 (float64x2_t a)
A64: FRECPE Vd.2D, Vn.2D
|
ReciprocalEstimateScalar(Vector64<Double>)
|
float64x1_t vrecpe_f64 (float64x1_t a)
A64: FRECPE Dd, Dn
|
ReciprocalEstimateScalar(Vector64<Single>)
|
float32_t vrecpes_f32 (float32_t a)
A64: FRECPE Sd, Sn
|
ReciprocalExponentScalar(Vector64<Double>)
|
float64_t vrecpxd_f64 (float64_t a)
A64: FRECPX Dd, Dn
|
ReciprocalExponentScalar(Vector64<Single>)
|
float32_t vrecpxs_f32 (float32_t a)
A64: FRECPX Sd, Sn
|
ReciprocalSquareRootEstimate(Vector128<Double>)
|
float64x2_t vrsqrteq_f64 (float64x2_t a)
A64: FRSQRTE Vd.2D, Vn.2D
|
ReciprocalSquareRootEstimateScalar(Vector64<Double>)
|
float64x1_t vrsqrte_f64 (float64x1_t a)
A64: FRSQRTE Dd, Dn
|
ReciprocalSquareRootEstimateScalar(Vector64<Single>)
|
float32_t vrsqrtes_f32 (float32_t a)
A64: FRSQRTE Sd, Sn
|
ReciprocalSquareRootStep(Vector128<Double>, Vector128<Double>)
|
float64x2_t vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
A64: FRSQRTS Vd.2D, Vn.2D, Vm.2D
|
ReciprocalSquareRootStepScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vrsqrts_f64 (float64x1_t a, float64x1_t b)
A64: FRSQRTS Dd, Dn, Dm
|
ReciprocalSquareRootStepScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vrsqrtss_f32 (float32_t a, float32_t b)
A64: FRSQRTS Sd, Sn, Sm
|
ReciprocalStep(Vector128<Double>, Vector128<Double>)
|
float64x2_t vrecpsq_f64 (float64x2_t a, float64x2_t b)
A64: FRECPS Vd.2D, Vn.2D, Vm.2D
|
ReciprocalStepScalar(Vector64<Double>, Vector64<Double>)
|
float64x1_t vrecps_f64 (float64x1_t a, float64x1_t b)
A64: FRECPS Dd, Dn, Dm
|
ReciprocalStepScalar(Vector64<Single>, Vector64<Single>)
|
float32_t vrecpss_f32 (float32_t a, float32_t b)
A64: FRECPS Sd, Sn, Sm
|
ReverseElementBits(Vector128<Byte>)
|
uint8x16_t vrbitq_u8 (uint8x16_t a)
A64: RBIT Vd.16B, Vn.16B
|
ReverseElementBits(Vector128<SByte>)
|
int8x16_t vrbitq_s8 (int8x16_t a)
A64: RBIT Vd.16B, Vn.16B
|
ReverseElementBits(Vector64<Byte>)
|
uint8x8_t vrbit_u8 (uint8x8_t a)
A64: RBIT Vd.8B, Vn.8B
|
ReverseElementBits(Vector64<SByte>)
|
int8x8_t vrbit_s8 (int8x8_t a)
A64: RBIT Vd.8B, Vn.8B
|
RoundAwayFromZero(Vector128<Double>)
|
float64x2_t vrndaq_f64 (float64x2_t a)
A64: FRINTA Vd.2D, Vn.2D
|
RoundToNearest(Vector128<Double>)
|
float64x2_t vrndnq_f64 (float64x2_t a)
A64: FRINTN Vd.2D, Vn.2D
|
RoundToNegativeInfinity(Vector128<Double>)
|
float64x2_t vrndmq_f64 (float64x2_t a)
A64: FRINTM Vd.2D, Vn.2D
|
RoundToPositiveInfinity(Vector128<Double>)
|
float64x2_t vrndpq_f64 (float64x2_t a)
A64: FRINTP Vd.2D, Vn.2D
|
RoundToZero(Vector128<Double>)
|
float64x2_t vrndq_f64 (float64x2_t a)
A64: FRINTZ Vd.2D, Vn.2D
|
ShiftArithmeticRoundedSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqrshlh_s16 (int16_t a, int16_t b)
A64: SQRSHL Hd, Hn, Hm
|
ShiftArithmeticRoundedSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqrshls_s32 (int32_t a, int32_t b)
A64: SQRSHL Sd, Sn, Sm
|
ShiftArithmeticRoundedSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
int8_t vqrshlb_s8 (int8_t a, int8_t b)
A64: SQRSHL Bd, Bn, Bm
|
ShiftArithmeticSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqshlh_s16 (int16_t a, int16_t b)
A64: SQSHL Hd, Hn, Hm
|
ShiftArithmeticSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqshls_s32 (int32_t a, int32_t b)
A64: SQSHL Sd, Sn, Sm
|
ShiftArithmeticSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
int8_t vqshlb_s8 (int8_t a, int8_t b)
A64: SQSHL Bd, Bn, Bm
|
ShiftLeftLogicalSaturateScalar(Vector64<Byte>, Byte)
|
uint8_t vqshlb_n_u8 (uint8_t, const int n)
A64: UQSHL Bd, Bn, #n
|
ShiftLeftLogicalSaturateScalar(Vector64<Int16>, Byte)
|
int16_t vqshlh_n_s16 (int16_t, const int n)
A64: SQSHL Hd, Hn, #n
|
ShiftLeftLogicalSaturateScalar(Vector64<Int32>, Byte)
|
int32_t vqshls_n_s32 (int32_t, const int n)
A64: SQSHL Sd, Sn, #n
|
ShiftLeftLogicalSaturateScalar(Vector64<SByte>, Byte)
|
int8_t vqshlb_n_s8 (int8_t a, const int n)
A64: SQSHL Bd, Bn, #n
|
ShiftLeftLogicalSaturateScalar(Vector64<UInt16>, Byte)
|
uint16_t vqshlh_n_u16 (uint16_t a, const int n)
A64: UQSHL Hd, Hn, #n
|
ShiftLeftLogicalSaturateScalar(Vector64<UInt32>, Byte)
|
uint32_t vqshls_n_u32 (uint32_t a, const int n)
A64: UQSHL Sd, Sn, #n
|
ShiftLeftLogicalSaturateUnsignedScalar(Vector64<Int16>, Byte)
|
uint16_t vqshluh_n_s16 (int16_t, const int n)
A64: SQSHLU Hd, Hn, #n
|
ShiftLeftLogicalSaturateUnsignedScalar(Vector64<Int32>, Byte)
|
uint32_t vqshlus_n_s32 (int32_t, const int n)
A64: SQSHLU Sd, Sn, #n
|
ShiftLeftLogicalSaturateUnsignedScalar(Vector64<SByte>, Byte)
|
uint8_t vqshlub_n_s8 (int8_t, const int n)
A64: SQSHLU Bd, Bn, #n
|
ShiftLogicalRoundedSaturateScalar(Vector64<Byte>, Vector64<SByte>)
|
uint8_t vqrshlb_u8 (uint8_t a, int8_t b)
A64: UQRSHL Bd, Bn, Bm
|
ShiftLogicalRoundedSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
uint16_t vqrshlh_u16 (uint16_t a, int16_t b)
A64: UQRSHL Hd, Hn, Hm
|
ShiftLogicalRoundedSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
uint32_t vqrshls_u32 (uint32_t a, int32_t b)
A64: UQRSHL Sd, Sn, Sm
|
ShiftLogicalRoundedSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
uint8_t vqrshlb_u8 (uint8_t a, int8_t b)
A64: UQRSHL Bd, Bn, Bm
|
ShiftLogicalRoundedSaturateScalar(Vector64<UInt16>, Vector64<Int16>)
|
uint16_t vqrshlh_u16 (uint16_t a, int16_t b)
A64: UQRSHL Hd, Hn, Hm
|
ShiftLogicalRoundedSaturateScalar(Vector64<UInt32>, Vector64<Int32>)
|
uint32_t vqrshls_u32 (uint32_t a, int32_t b)
A64: UQRSHL Sd, Sn, Sm
|
ShiftLogicalSaturateScalar(Vector64<Byte>, Vector64<SByte>)
|
uint8_t vqshlb_u8 (uint8_t a, int8_t b)
A64: UQSHL Bd, Bn, Bm
|
ShiftLogicalSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
uint16_t vqshlh_u16 (uint16_t a, int16_t b)
A64: UQSHL Hd, Hn, Hm
|
ShiftLogicalSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
uint32_t vqshls_u32 (uint32_t a, int32_t b)
A64: UQSHL Sd, Sn, Sm
|
ShiftLogicalSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
uint8_t vqshlb_u8 (uint8_t a, int8_t b)
A64: UQSHL Bd, Bn, Bm
|
ShiftLogicalSaturateScalar(Vector64<UInt16>, Vector64<Int16>)
|
uint16_t vqshlh_u16 (uint16_t a, int16_t b)
A64: UQSHL Hd, Hn, Hm
|
ShiftLogicalSaturateScalar(Vector64<UInt32>, Vector64<Int32>)
|
uint32_t vqshls_u32 (uint32_t a, int32_t b)
A64: UQSHL Sd, Sn, Sm
|
ShiftRightArithmeticNarrowingSaturateScalar(Vector64<Int16>, Byte)
|
int8_t vqshrnh_n_s16 (int16_t a, const int n)
A64: SQSHRN Bd, Hn, #n
|
ShiftRightArithmeticNarrowingSaturateScalar(Vector64<Int32>, Byte)
|
int16_t vqshrns_n_s32 (int32_t a, const int n)
A64: SQSHRN Hd, Sn, #n
|
ShiftRightArithmeticNarrowingSaturateScalar(Vector64<Int64>, Byte)
|
int32_t vqshrnd_n_s64 (int64_t, const int n)
A64: SQSHRN Sd, Dn, #n
|
ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<Int16>, Byte)
|
uint8_t vqshrunh_n_s16 (int16_t a, const int n)
A64: SQSHRUN Bd, Hn, #n
|
ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<Int32>, Byte)
|
uint16_t vqshruns_n_s32 (int32_t, const int n)
A64: SQSHRUN Hd, Sn, #n
|
ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<Int64>, Byte)
|
uint32_t vqshrund_n_s64 (int64_t, const int n)
A64: SQSHRUN Sd, Dn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<Int16>, Byte)
|
int8_t vqrshrnh_n_s16 (int16_t a, const int n)
A64: SQRSHRN Bd, Hn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<Int32>, Byte)
|
int16_t vqrshrns_n_s32 (int32_t, const int n)
A64: SQRSHRN Hd, Sn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<Int64>, Byte)
|
int32_t vqrshrnd_n_s64 (int64_t, const int n)
A64: SQRSHRN Sd, Dn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<Int16>, Byte)
|
uint8_t vqrshrunh_n_s16 (int16_t a, const int n)
A64: SQRSHRUN Bd, Hn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<Int32>, Byte)
|
uint16_t vqrshruns_n_s32 (int32_t a, const int n)
A64: SQRSHRUN Hd, Sn, #n
|
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<Int64>, Byte)
|
uint32_t vqrshrund_n_s64 (int64_t, const int n)
A64: SQRSHRUN Sd, Dn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<Int16>, Byte)
|
uint8_t vqshrnh_n_u16 (uint16_t, const int n)
A64: UQSHRN Bd, Hn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<Int32>, Byte)
|
uint16_t vqshrns_n_u32 (uint32_t, const int n)
A64: UQSHRN Hd, Sn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<Int64>, Byte)
|
uint32_t vqshrnd_n_u64 (uint64_t, const int n)
A64: UQSHRN Sd, Dn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<UInt16>, Byte)
|
uint8_t vqshrnh_n_u16 (uint16_t, const int n)
A64: UQSHRN Bd, Hn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<UInt32>, Byte)
|
uint16_t vqshrns_n_u32 (uint32_t, const int n)
A64: UQSHRN Hd, Sn, #n
|
ShiftRightLogicalNarrowingSaturateScalar(Vector64<UInt64>, Byte)
|
uint32_t vqshrnd_n_u64 (uint64_t, const int n)
A64: UQSHRN Sd, Dn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<Int16>, Byte)
|
uint8_t vqrshrnh_n_u16 (uint16_t, const int n)
A64: UQRSHRN Bd, Hn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<Int32>, Byte)
|
uint16_t vqrshrns_n_u32 (uint32_t, const int n)
A64: UQRSHRN Hd, Sn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<Int64>, Byte)
|
uint32_t vqrshrnd_n_u64 (uint64_t, const int n)
A64: UQRSHRN Sd, Dn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<UInt16>, Byte)
|
uint8_t vqrshrnh_n_u16 (uint16_t, const int n)
A64: UQRSHRN Bd, Hn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<UInt32>, Byte)
|
uint16_t vqrshrns_n_u32 (uint32_t, const int n)
A64: UQRSHRN Hd, Sn, #n
|
ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<UInt64>, Byte)
|
uint32_t vqrshrnd_n_u64 (uint64_t, const int n)
A64: UQRSHRN Sd, Dn, #n
|
Sqrt(Vector128<Double>)
|
float64x2_t vsqrtq_f64 (float64x2_t a)
A64: FSQRT Vd.2D, Vn.2D
|
Sqrt(Vector128<Single>)
|
float32x4_t vsqrtq_f32 (float32x4_t a)
A64: FSQRT Vd.4S, Vn.4S
|
Sqrt(Vector64<Single>)
|
float32x2_t vsqrt_f32 (float32x2_t a)
A64: FSQRT Vd.2S, Vn.2S
|
StorePair(Byte*, Vector128<Byte>, Vector128<Byte>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Byte*, Vector64<Byte>, Vector64<Byte>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(Double*, Vector128<Double>, Vector128<Double>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Double*, Vector64<Double>, Vector64<Double>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(Int16*, Vector128<Int16>, Vector128<Int16>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Int16*, Vector64<Int16>, Vector64<Int16>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(Int32*, Vector128<Int32>, Vector128<Int32>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Int32*, Vector64<Int32>, Vector64<Int32>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(Int64*, Vector128<Int64>, Vector128<Int64>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Int64*, Vector64<Int64>, Vector64<Int64>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(SByte*, Vector128<SByte>, Vector128<SByte>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(SByte*, Vector64<SByte>, Vector64<SByte>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(Single*, Vector128<Single>, Vector128<Single>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(Single*, Vector64<Single>, Vector64<Single>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(UInt16*, Vector128<UInt16>, Vector128<UInt16>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(UInt16*, Vector64<UInt16>, Vector64<UInt16>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(UInt32*, Vector128<UInt32>, Vector128<UInt32>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(UInt32*, Vector64<UInt32>, Vector64<UInt32>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePair(UInt64*, Vector128<UInt64>, Vector128<UInt64>)
|
A64: STP Qt1, Qt2, [Xn]
|
StorePair(UInt64*, Vector64<UInt64>, Vector64<UInt64>)
|
A64: STP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Byte*, Vector128<Byte>, Vector128<Byte>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Byte*, Vector64<Byte>, Vector64<Byte>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Double*, Vector128<Double>, Vector128<Double>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Double*, Vector64<Double>, Vector64<Double>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Int16*, Vector128<Int16>, Vector128<Int16>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Int16*, Vector64<Int16>, Vector64<Int16>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Int32*, Vector128<Int32>, Vector128<Int32>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Int32*, Vector64<Int32>, Vector64<Int32>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Int64*, Vector128<Int64>, Vector128<Int64>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Int64*, Vector64<Int64>, Vector64<Int64>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(SByte*, Vector128<SByte>, Vector128<SByte>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(SByte*, Vector64<SByte>, Vector64<SByte>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(Single*, Vector128<Single>, Vector128<Single>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(Single*, Vector64<Single>, Vector64<Single>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(UInt16*, Vector128<UInt16>, Vector128<UInt16>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(UInt16*, Vector64<UInt16>, Vector64<UInt16>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(UInt32*, Vector128<UInt32>, Vector128<UInt32>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(UInt32*, Vector64<UInt32>, Vector64<UInt32>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairNonTemporal(UInt64*, Vector128<UInt64>, Vector128<UInt64>)
|
A64: STNP Qt1, Qt2, [Xn]
|
StorePairNonTemporal(UInt64*, Vector64<UInt64>, Vector64<UInt64>)
|
A64: STNP Dt1, Dt2, [Xn]
|
StorePairScalar(Int32*, Vector64<Int32>, Vector64<Int32>)
|
A64: STP St1, St2, [Xn]
|
StorePairScalar(Single*, Vector64<Single>, Vector64<Single>)
|
A64: STP St1, St2, [Xn]
|
StorePairScalar(UInt32*, Vector64<UInt32>, Vector64<UInt32>)
|
A64: STP St1, St2, [Xn]
|
StorePairScalarNonTemporal(Int32*, Vector64<Int32>, Vector64<Int32>)
|
A64: ST1, St2, [Xn]
|
StorePairScalarNonTemporal(Single*, Vector64<Single>, Vector64<Single>)
|
A64: ST1, St2, [Xn]
|
StorePairScalarNonTemporal(UInt32*, Vector64<UInt32>, Vector64<UInt32>)
|
A64: ST1, St2, [Xn]
|
StoreSelectedScalar(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Byte)
|
StoreSelectedScalar(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Byte)
|
StoreSelectedScalar(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>>, Byte)
|
StoreSelectedScalar(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>,Vector128<Double>>, Byte)
|
StoreSelectedScalar(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>>, Byte)
|
StoreSelectedScalar(Double*, ValueTuple<Vector128<Double>,Vector128<Double>>, Byte)
|
StoreSelectedScalar(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>, Byte)
|
StoreSelectedScalar(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>, Byte)
|
StoreSelectedScalar(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>>, Byte)
|
StoreSelectedScalar(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>, Byte)
|
StoreSelectedScalar(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>, Byte)
|
StoreSelectedScalar(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>>, Byte)
|
StoreSelectedScalar(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>, Byte)
|
StoreSelectedScalar(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>, Byte)
|
StoreSelectedScalar(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>>, Byte)
|
StoreSelectedScalar(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Byte)
|
StoreSelectedScalar(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Byte)
|
StoreSelectedScalar(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>>, Byte)
|
StoreSelectedScalar(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>,Vector128<Single>>, Byte)
|
StoreSelectedScalar(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>>, Byte)
|
StoreSelectedScalar(Single*, ValueTuple<Vector128<Single>,Vector128<Single>>, Byte)
|
StoreSelectedScalar(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>, Byte)
|
StoreSelectedScalar(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>, Byte)
|
StoreSelectedScalar(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>>, Byte)
|
StoreSelectedScalar(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>, Byte)
|
StoreSelectedScalar(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>, Byte)
|
StoreSelectedScalar(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>>, Byte)
|
StoreSelectedScalar(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>, Byte)
|
StoreSelectedScalar(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>, Byte)
|
StoreSelectedScalar(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>>, Byte)
|
StoreVector128x2(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x2(Double*, ValueTuple<Vector128<Double>,Vector128<Double>>)
|
StoreVector128x2(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x2(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x2(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x2(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x2(Single*, ValueTuple<Vector128<Single>,Vector128<Single>>)
|
StoreVector128x2(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x2(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x2(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>>)
|
StoreVector128x2AndZip(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x2AndZip(Double*, ValueTuple<Vector128<Double>,Vector128<Double>>)
|
StoreVector128x2AndZip(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x2AndZip(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x2AndZip(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x2AndZip(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x2AndZip(Single*, ValueTuple<Vector128<Single>,Vector128<Single>>)
|
StoreVector128x2AndZip(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x2AndZip(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x2AndZip(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>>)
|
StoreVector128x3(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x3(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>>)
|
StoreVector128x3(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x3(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x3(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x3(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x3(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>>)
|
StoreVector128x3(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x3(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x3(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>)
|
StoreVector128x3AndZip(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x3AndZip(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>>)
|
StoreVector128x3AndZip(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x3AndZip(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x3AndZip(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x3AndZip(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x3AndZip(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>>)
|
StoreVector128x3AndZip(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x3AndZip(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x3AndZip(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>)
|
StoreVector128x4(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x4(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>,Vector128<Double>>)
|
StoreVector128x4(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x4(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x4(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x4(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x4(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>,Vector128<Single>>)
|
StoreVector128x4(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x4(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x4(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>)
|
StoreVector128x4AndZip(Byte*, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>)
|
StoreVector128x4AndZip(Double*, ValueTuple<Vector128<Double>,Vector128<Double>,Vector128<Double>,Vector128<Double>>)
|
StoreVector128x4AndZip(Int16*, ValueTuple<Vector128<Int16>,Vector128<Int16>,Vector128<Int16>,Vector128<Int16>>)
|
StoreVector128x4AndZip(Int32*, ValueTuple<Vector128<Int32>,Vector128<Int32>,Vector128<Int32>,Vector128<Int32>>)
|
StoreVector128x4AndZip(Int64*, ValueTuple<Vector128<Int64>,Vector128<Int64>,Vector128<Int64>,Vector128<Int64>>)
|
StoreVector128x4AndZip(SByte*, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>)
|
StoreVector128x4AndZip(Single*, ValueTuple<Vector128<Single>,Vector128<Single>,Vector128<Single>,Vector128<Single>>)
|
StoreVector128x4AndZip(UInt16*, ValueTuple<Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>,Vector128<UInt16>>)
|
StoreVector128x4AndZip(UInt32*, ValueTuple<Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>,Vector128<UInt32>>)
|
StoreVector128x4AndZip(UInt64*, ValueTuple<Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>,Vector128<UInt64>>)
|
Subtract(Vector128<Double>, Vector128<Double>)
|
float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b)
A64: FSUB Vd.2D, Vn.2D, Vm.2D
|
SubtractSaturateScalar(Vector64<Byte>, Vector64<Byte>)
|
uint8_t vqsubb_u8 (uint8_t a, uint8_t b)
A64: UQSUB Bd, Bn, Bm
|
SubtractSaturateScalar(Vector64<Int16>, Vector64<Int16>)
|
int16_t vqsubh_s16 (int16_t a, int16_t b)
A64: SQSUB Hd, Hn, Hm
|
SubtractSaturateScalar(Vector64<Int32>, Vector64<Int32>)
|
int32_t vqsubs_s32 (int32_t a, int32_t b)
A64: SQSUB Sd, Sn, Sm
|
SubtractSaturateScalar(Vector64<SByte>, Vector64<SByte>)
|
int8_t vqsubb_s8 (int8_t a, int8_t b)
A64: SQSUB Bd, Bn, Bm
|
SubtractSaturateScalar(Vector64<UInt16>, Vector64<UInt16>)
|
uint16_t vqsubh_u16 (uint16_t a, uint16_t b)
A64: UQSUB Hd, Hn, Hm
|
SubtractSaturateScalar(Vector64<UInt32>, Vector64<UInt32>)
|
uint32_t vqsubs_u32 (uint32_t a, uint32_t b)
A64: UQSUB Sd, Sn, Sm
|
ToString()
|
Zwraca ciąg reprezentujący bieżący obiekt.
(Odziedziczone po Object)
|
TransposeEven(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vtrn1q_u8(uint8x16_t a, uint8x16_t b)
A64: TRN1 Vd.16B, Vn.16B, Vm.16B
|
TransposeEven(Vector128<Double>, Vector128<Double>)
|
float64x2_t vtrn1q_f64(float64x2_t a, float64x2_t b)
A64: TRN1 Vd.2D, Vn.2D, Vm.2D
|
TransposeEven(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vtrn1q_s16(int16x8_t a, int16x8_t b)
A64: TRN1 Vd.8H, Vn.8H, Vm.8H
|
TransposeEven(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vtrn1q_s32(int32x4_t a, int32x4_t b)
A64: TRN1 Vd.4S, Vn.4S, Vm.4S
|
TransposeEven(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vtrn1q_s64(int64x2_t a, int64x2_t b)
A64: TRN1 Vd.2D, Vn.2D, Vm.2D
|
TransposeEven(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vtrn1q_u8(int8x16_t a, int8x16_t b)
A64: TRN1 Vd.16B, Vn.16B, Vm.16B
|
TransposeEven(Vector128<Single>, Vector128<Single>)
|
float32x4_t vtrn1q_f32(float32x4_t a, float32x4_t b)
A64: TRN1 Vd.4S, Vn.4S, Vm.4S
|
TransposeEven(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vtrn1q_u16(uint16x8_t a, uint16x8_t b)
A64: TRN1 Vd.8H, Vn.8H, Vm.8H
|
TransposeEven(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vtrn1q_u32(uint32x4_t a, uint32x4_t b)
A64: TRN1 Vd.4S, Vn.4S, Vm.4S
|
TransposeEven(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vtrn1q_u64(uint64x2_t a, uint64x2_t b)
A64: TRN1 Vd.2D, Vn.2D, Vm.2D
|
TransposeEven(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vtrn1_u8(uint8x8_t a, uint8x8_t b)
A64: TRN1 Vd.8B, Vn.8B, Vm.8B
|
TransposeEven(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vtrn1_s16(int16x4_t a, int16x4_t b)
A64: TRN1 Vd.4H, Vn.4H, Vm.4H
|
TransposeEven(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vtrn1_s32(int32x2_t a, int32x2_t b)
A64: TRN1 Vd.2S, Vn.2S, Vm.2S
|
TransposeEven(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vtrn1_s8(int8x8_t a, int8x8_t b)
A64: TRN1 Vd.8B, Vn.8B, Vm.8B
|
TransposeEven(Vector64<Single>, Vector64<Single>)
|
float32x2_t vtrn1_f32(float32x2_t a, float32x2_t b)
A64: TRN1 Vd.2S, Vn.2S, Vm.2S
|
TransposeEven(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vtrn1_u16(uint16x4_t a, uint16x4_t b)
A64: TRN1 Vd.4H, Vn.4H, Vm.4H
|
TransposeEven(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vtrn1_u32(uint32x2_t a, uint32x2_t b)
A64: TRN1 Vd.2S, Vn.2S, Vm.2S
|
TransposeOdd(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vtrn2q_u8(uint8x16_t a, uint8x16_t b)
A64: TRN2 Vd.16B, Vn.16B, Vm.16B
|
TransposeOdd(Vector128<Double>, Vector128<Double>)
|
float64x2_t vtrn2q_f64(float64x2_t a, float64x2_t b)
A64: TRN2 Vd.2D, Vn.2D, Vm.2D
|
TransposeOdd(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vtrn2q_s16(int16x8_t a, int16x8_t b)
A64: TRN2 Vd.8H, Vn.8H, Vm.8H
|
TransposeOdd(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vtrn2q_s32(int32x4_t a, int32x4_t b)
A64: TRN2 Vd.4S, Vn.4S, Vm.4S
|
TransposeOdd(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vtrn2q_s64(int64x2_t a, int64x2_t b)
A64: TRN2 Vd.2D, Vn.2D, Vm.2D
|
TransposeOdd(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vtrn2q_u8(int8x16_t a, int8x16_t b)
A64: TRN2 Vd.16B, Vn.16B, Vm.16B
|
TransposeOdd(Vector128<Single>, Vector128<Single>)
|
float32x4_t vtrn2q_f32(float32x4_t a, float32x4_t b)
A64: TRN2 Vd.4S, Vn.4S, Vm.4S
|
TransposeOdd(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vtrn2q_u16(uint16x8_t a, uint16x8_t b)
A64: TRN2 Vd.8H, Vn.8H, Vm.8H
|
TransposeOdd(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vtrn2q_u32(uint32x4_t a, uint32x4_t b)
A64: TRN2 Vd.4S, Vn.4S, Vm.4S
|
TransposeOdd(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vtrn2q_u64(uint64x2_t a, uint64x2_t b)
A64: TRN2 Vd.2D, Vn.2D, Vm.2D
|
TransposeOdd(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vtrn2_u8(uint8x8_t a, uint8x8_t b)
A64: TRN2 Vd.8B, Vn.8B, Vm.8B
|
TransposeOdd(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vtrn2_s16(int16x4_t a, int16x4_t b)
A64: TRN2 Vd.4H, Vn.4H, Vm.4H
|
TransposeOdd(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vtrn2_s32(int32x2_t a, int32x2_t b)
A64: TRN2 Vd.2S, Vn.2S, Vm.2S
|
TransposeOdd(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vtrn2_s8(int8x8_t a, int8x8_t b)
A64: TRN2 Vd.8B, Vn.8B, Vm.8B
|
TransposeOdd(Vector64<Single>, Vector64<Single>)
|
float32x2_t vtrn2_f32(float32x2_t a, float32x2_t b)
A64: TRN2 Vd.2S, Vn.2S, Vm.2S
|
TransposeOdd(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vtrn2_u16(uint16x4_t a, uint16x4_t b)
A64: TRN2 Vd.4H, Vn.4H, Vm.4H
|
TransposeOdd(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vtrn2_u32(uint32x2_t a, uint32x2_t b)
A64: TRN2 Vd.2S, Vn.2S, Vm.2S
|
UnzipEven(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vuzp1q_u8(uint8x16_t a, uint8x16_t b)
A64: UZP1 Vd.16B, Vn.16B, Vm.16B
|
UnzipEven(Vector128<Double>, Vector128<Double>)
|
float64x2_t vuzp1q_f64(float64x2_t a, float64x2_t b)
A64: UZP1 Vd.2D, Vn.2D, Vm.2D
|
UnzipEven(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vuzp1q_s16(int16x8_t a, int16x8_t b)
A64: UZP1 Vd.8H, Vn.8H, Vm.8H
|
UnzipEven(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vuzp1q_s32(int32x4_t a, int32x4_t b)
A64: UZP1 Vd.4S, Vn.4S, Vm.4S
|
UnzipEven(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vuzp1q_s64(int64x2_t a, int64x2_t b)
A64: UZP1 Vd.2D, Vn.2D, Vm.2D
|
UnzipEven(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vuzp1q_u8(int8x16_t a, int8x16_t b)
A64: UZP1 Vd.16B, Vn.16B, Vm.16B
|
UnzipEven(Vector128<Single>, Vector128<Single>)
|
float32x4_t vuzp1q_f32(float32x4_t a, float32x4_t b)
A64: UZP1 Vd.4S, Vn.4S, Vm.4S
|
UnzipEven(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vuzp1q_u16(uint16x8_t a, uint16x8_t b)
A64: UZP1 Vd.8H, Vn.8H, Vm.8H
|
UnzipEven(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vuzp1q_u32(uint32x4_t a, uint32x4_t b)
A64: UZP1 Vd.4S, Vn.4S, Vm.4S
|
UnzipEven(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vuzp1q_u64(uint64x2_t a, uint64x2_t b)
A64: UZP1 Vd.2D, Vn.2D, Vm.2D
|
UnzipEven(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vuzp1_u8(uint8x8_t a, uint8x8_t b)
A64: UZP1 Vd.8B, Vn.8B, Vm.8B
|
UnzipEven(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vuzp1_s16(int16x4_t a, int16x4_t b)
A64: UZP1 Vd.4H, Vn.4H, Vm.4H
|
UnzipEven(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vuzp1_s32(int32x2_t a, int32x2_t b)
A64: UZP1 Vd.2S, Vn.2S, Vm.2S
|
UnzipEven(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vuzp1_s8(int8x8_t a, int8x8_t b)
A64: UZP1 Vd.8B, Vn.8B, Vm.8B
|
UnzipEven(Vector64<Single>, Vector64<Single>)
|
float32x2_t vuzp1_f32(float32x2_t a, float32x2_t b)
A64: UZP1 Vd.2S, Vn.2S, Vm.2S
|
UnzipEven(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vuzp1_u16(uint16x4_t a, uint16x4_t b)
A64: UZP1 Vd.4H, Vn.4H, Vm.4H
|
UnzipEven(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vuzp1_u32(uint32x2_t a, uint32x2_t b)
A64: UZP1 Vd.2S, Vn.2S, Vm.2S
|
UnzipOdd(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vuzp2q_u8(uint8x16_t a, uint8x16_t b)
A64: UZP2 Vd.16B, Vn.16B, Vm.16B
|
UnzipOdd(Vector128<Double>, Vector128<Double>)
|
float64x2_t vuzp2q_f64(float64x2_t a, float64x2_t b)
A64: UZP2 Vd.2D, Vn.2D, Vm.2D
|
UnzipOdd(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vuzp2q_s16(int16x8_t a, int16x8_t b)
A64: UZP2 Vd.8H, Vn.8H, Vm.8H
|
UnzipOdd(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vuzp2q_s32(int32x4_t a, int32x4_t b)
A64: UZP2 Vd.4S, Vn.4S, Vm.4S
|
UnzipOdd(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vuzp2q_s64(int64x2_t a, int64x2_t b)
A64: UZP2 Vd.2D, Vn.2D, Vm.2D
|
UnzipOdd(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vuzp2q_u8(int8x16_t a, int8x16_t b)
A64: UZP2 Vd.16B, Vn.16B, Vm.16B
|
UnzipOdd(Vector128<Single>, Vector128<Single>)
|
float32x4_t vuzp2_f32(float32x4_t a, float32x4_t b)
A64: UZP2 Vd.4S, Vn.4S, Vm.4S
|
UnzipOdd(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vuzp2q_u16(uint16x8_t a, uint16x8_t b)
A64: UZP2 Vd.8H, Vn.8H, Vm.8H
|
UnzipOdd(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vuzp2q_u32(uint32x4_t a, uint32x4_t b)
A64: UZP2 Vd.4S, Vn.4S, Vm.4S
|
UnzipOdd(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vuzp2q_u64(uint64x2_t a, uint64x2_t b)
A64: UZP2 Vd.2D, Vn.2D, Vm.2D
|
UnzipOdd(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vuzp2_u8(uint8x8_t a, uint8x8_t b)
A64: UZP2 Vd.8B, Vn.8B, Vm.8B
|
UnzipOdd(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vuzp2_s16 (int16x4_t a, int16x4_t b)
A64: UZP2 Vd.4H, Vn.4H, Vm.4H
|
UnzipOdd(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vuzp2_s32 (int32x2_t a, int32x2_t b)
A64: UZP2 Vd.2S, Vn.2S, Vm.2S
|
UnzipOdd(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vuzp2_s8(int8x8_t a, int8x8_t b)
A64: UZP2 Vd.8B, Vn.8B, Vm.8B
|
UnzipOdd(Vector64<Single>, Vector64<Single>)
|
float32x2_t vuzp2_f32(float32x2_t a, float32x2_t b)
A64: UZP2 Vd.2S, Vn.2S, Vm.2S
|
UnzipOdd(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vuzp2_u16(uint16x4_t a, uint16x4_t b)
A64: UZP2 Vd.4H, Vn.4H, Vm.4H
|
UnzipOdd(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vuzp2_u32 (uint32x2_t a, uint32x2_t b)
A64: UZP2 Vd.2S, Vn.2S, Vm.2S
|
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbl4q_u8 (t uint8x16x4_t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B}
|
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbl3q_u8(uint8x16x3_t t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
|
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbl2q_u8(uint8x16x2_t t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
|
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbl4q_s8(t int8x16x4_t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B}
|
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbl3q_s8(int8x16x3_t t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
|
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbl2q_s8(int8x16x2_t t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
|
VectorTableLookup(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vqvtbl1q_u8(t uint8x16_t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B}, Vm.16B
|
VectorTableLookup(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vqvtbl1q_s8(int8x16_t t, idx uint8x16_t)
A64: TBL Vd.16B, {Vn.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbx4q_u8(uint8x16x4_t t, uint8x16_t idx)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbx3q_u8(uint8x16x3_t t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>>, Vector128<Byte>)
|
uint8x16_t vqtbx2q_u8(t uint8x16x2_t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vqvtbx1q_u8(uint8x16_t r, t int8x16_t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbx4q_s8(t int8x16x4_t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbx3q_s8(int8x16x3_t t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>>, Vector128<SByte>)
|
int8x16_t vqtbx2q_s8(int8x16x2_t t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
|
VectorTableLookupExtension(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vqvtbx1q_s8(int8x16_t r, int8x16_t t, idx uint8x16_t)
A64: TBX Vd.16B, {Vn.16B}, Vm.16B
|
ZipHigh(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vzip2q_u8(uint8x16_t a, uint8x16_t b)
A64: ZIP2 Vd.16B, Vn.16B, Vm.16B
|
ZipHigh(Vector128<Double>, Vector128<Double>)
|
float64x2_t vzip2q_f64(float64x2_t a, float64x2_t b)
A64: ZIP2 Vd.2D, Vn.2D, Vm.2D
|
ZipHigh(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vzip2q_s16(int16x8_t a, int16x8_t b)
A64: ZIP2 Vd.8H, Vn.8H, Vm.8H
|
ZipHigh(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vzip2q_s32(int32x4_t a, int32x4_t b)
A64: ZIP2 Vd.4S, Vn.4S, Vm.4S
|
ZipHigh(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vzip2q_s64(int64x2_t a, int64x2_t b)
A64: ZIP2 Vd.2D, Vn.2D, Vm.2D
|
ZipHigh(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vzip2q_u8(int8x16_t a, int8x16_t b)
A64: ZIP2 Vd.16B, Vn.16B, Vm.16B
|
ZipHigh(Vector128<Single>, Vector128<Single>)
|
float32x4_t vzip2q_f32(float32x4_t a, float32x4_t b)
A64: ZIP2 Vd.4S, Vn.4S, Vm.4S
|
ZipHigh(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vzip2q_u16(uint16x8_t a, uint16x8_t b)
A64: ZIP2 Vd.8H, Vn.8H, Vm.8H
|
ZipHigh(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vzip2q_u32(uint32x4_t a, uint32x4_t b)
A64: ZIP2 Vd.4S, Vn.4S, Vm.4S
|
ZipHigh(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vzip2q_u64 (uint64x2_t a, uint64x2_t b)
A64: ZIP2 Vd.2D, Vn.2D, Vm.2D
|
ZipHigh(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b)
A64: ZIP2 Vd.8B, Vn.8B, Vm.8B
|
ZipHigh(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vzip2_s16(int16x4_t a, int16x4_t b)
A64: ZIP2 Vd.4H, Vn.4H, Vm.4H
|
ZipHigh(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vzip2_s32(int32x2_t a, int32x2_t b)
A64: ZIP2 Vd.2S, Vn.2S, Vm.2S
|
ZipHigh(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vzip2_s8(int8x8_t a, int8x8_t b)
A64: ZIP2 Vd.8B, Vn.8B, Vm.8B
|
ZipHigh(Vector64<Single>, Vector64<Single>)
|
float32x2_t vzip2_f32(float32x2_t a, float32x2_t b)
A64: ZIP2 Vd.2S, Vn.2S, Vm.2S
|
ZipHigh(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vzip2_u16(uint16x4_t a, uint16x4_t b)
A64: ZIP2 Vd.4H, Vn.4H, Vm.4H
|
ZipHigh(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vzip2_u32(uint32x2_t a, uint32x2_t b)
A64: ZIP2 Vd.2S, Vn.2S, Vm.2S
|
ZipLow(Vector128<Byte>, Vector128<Byte>)
|
uint8x16_t vzip1q_u8(uint8x16_t a, uint8x16_t b)
A64: ZIP1 Vd.16B, Vn.16B, Vm.16B
|
ZipLow(Vector128<Double>, Vector128<Double>)
|
float64x2_t vzip1q_f64(float64x2_t a, float64x2_t b)
A64: ZIP1 Vd.2D, Vn.2D, Vm.2D
|
ZipLow(Vector128<Int16>, Vector128<Int16>)
|
int16x8_t vzip1q_s16(int16x8_t a, int16x8_t b)
A64: ZIP1 Vd.8H, Vn.8H, Vm.8H
|
ZipLow(Vector128<Int32>, Vector128<Int32>)
|
int32x4_t vzip1q_s32(int32x4_t a, int32x4_t b)
A64: ZIP1 Vd.4S, Vn.4S, Vm.4S
|
ZipLow(Vector128<Int64>, Vector128<Int64>)
|
int64x2_t vzip1q_s64 (int64x2_t a, int64x2_t b)
A64: ZIP1 Vd.2D, Vn.2D, Vm.2D
|
ZipLow(Vector128<SByte>, Vector128<SByte>)
|
int8x16_t vzip1q_u8(int8x16_t a, int8x16_t b)
A64: ZIP1 Vd.16B, Vn.16B, Vm.16B
|
ZipLow(Vector128<Single>, Vector128<Single>)
|
float32x4_t vzip1q_f32(float32x4_t a, float32x4_t b)
A64: ZIP1 Vd.4S, Vn.4S, Vm.4S
|
ZipLow(Vector128<UInt16>, Vector128<UInt16>)
|
uint16x8_t vzip1q_u16 (uint16x8_t a, uint16x8_t b)
A64: ZIP1 Vd.8H, Vn.8H, Vm.8H
|
ZipLow(Vector128<UInt32>, Vector128<UInt32>)
|
uint32x4_t vzip1q_u32(uint32x4_t a, uint32x4_t b)
A64: ZIP1 Vd.4S, Vn.4S, Vm.4S
|
ZipLow(Vector128<UInt64>, Vector128<UInt64>)
|
uint64x2_t vzip1q_u64(uint64x2_t a, uint64x2_t b)
A64: ZIP1 Vd.2D, Vn.2D, Vm.2D
|
ZipLow(Vector64<Byte>, Vector64<Byte>)
|
uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b)
A64: ZIP1 Vd.8B, Vn.8B, Vm.8B
|
ZipLow(Vector64<Int16>, Vector64<Int16>)
|
int16x4_t vzip1_s16(int16x4_t a, int16x4_t b)
A64: ZIP1 Vd.4H, Vn.4H, Vm.4H
|
ZipLow(Vector64<Int32>, Vector64<Int32>)
|
int32x2_t vzip1_s32(int32x2_t a, int32x2_t b)
A64: ZIP1 Vd.2S, Vn.2S, Vm.2S
|
ZipLow(Vector64<SByte>, Vector64<SByte>)
|
int8x8_t vzip1_s8(int8x8_t a, int8x8_t b)
A64: ZIP1 Vd.8B, Vn.8B, Vm.8B
|
ZipLow(Vector64<Single>, Vector64<Single>)
|
float32x2_t vzip1_f32(float32x2_t a, float32x2_t b)
A64: ZIP1 Vd.2S, Vn.2S, Vm.2S
|
ZipLow(Vector64<UInt16>, Vector64<UInt16>)
|
uint16x4_t vzip1_u16 (uint16x4_t a, uint16x4_t b)
A64: ZIP1 Vd.4H, Vn.4H, Vm.4H
|
ZipLow(Vector64<UInt32>, Vector64<UInt32>)
|
uint32x2_t vzip1_u32(uint32x2_t a, uint32x2_t b)
A64: ZIP1 Vd.2S, Vn.2S, Vm.2S
|