]> git.proxmox.com Git - rustc.git/blame - library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
New upstream version 1.71.1+dfsg1
[rustc.git] / library / stdarch / crates / core_arch / src / aarch64 / neon / generated.rs
CommitLineData
ba9703b0
XL
1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file:
4//
5// ```
6// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec
7// ```
8use super::*;
9#[cfg(test)]
10use stdarch_test::assert_instr;
11
3c0e092e 12/// Three-way exclusive OR
f2b60f7d
FG
13///
14/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)
3c0e092e
XL
15#[inline]
16#[target_feature(enable = "neon,sha3")]
17#[cfg_attr(test, assert_instr(eor3))]
18pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
19 #[allow(improper_ctypes)]
20 extern "unadjusted" {
21 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3s.v16i8")]
22 fn veor3q_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
23 }
24 veor3q_s8_(a, b, c)
25}
26
27/// Three-way exclusive OR
f2b60f7d
FG
28///
29/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)
3c0e092e
XL
30#[inline]
31#[target_feature(enable = "neon,sha3")]
32#[cfg_attr(test, assert_instr(eor3))]
33pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
34 #[allow(improper_ctypes)]
35 extern "unadjusted" {
36 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3s.v8i16")]
37 fn veor3q_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
38 }
39 veor3q_s16_(a, b, c)
40}
41
42/// Three-way exclusive OR
f2b60f7d
FG
43///
44/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)
3c0e092e
XL
45#[inline]
46#[target_feature(enable = "neon,sha3")]
47#[cfg_attr(test, assert_instr(eor3))]
48pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
49 #[allow(improper_ctypes)]
50 extern "unadjusted" {
51 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3s.v4i32")]
52 fn veor3q_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
53 }
54 veor3q_s32_(a, b, c)
55}
56
57/// Three-way exclusive OR
f2b60f7d
FG
58///
59/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)
3c0e092e
XL
60#[inline]
61#[target_feature(enable = "neon,sha3")]
62#[cfg_attr(test, assert_instr(eor3))]
63pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
64 #[allow(improper_ctypes)]
65 extern "unadjusted" {
66 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3s.v2i64")]
67 fn veor3q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
68 }
69 veor3q_s64_(a, b, c)
70}
71
72/// Three-way exclusive OR
f2b60f7d
FG
73///
74/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)
3c0e092e
XL
75#[inline]
76#[target_feature(enable = "neon,sha3")]
77#[cfg_attr(test, assert_instr(eor3))]
78pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
79 #[allow(improper_ctypes)]
80 extern "unadjusted" {
81 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3u.v16i8")]
82 fn veor3q_u8_(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
83 }
84 veor3q_u8_(a, b, c)
85}
86
87/// Three-way exclusive OR
f2b60f7d
FG
88///
89/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)
3c0e092e
XL
90#[inline]
91#[target_feature(enable = "neon,sha3")]
92#[cfg_attr(test, assert_instr(eor3))]
93pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
94 #[allow(improper_ctypes)]
95 extern "unadjusted" {
96 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3u.v8i16")]
97 fn veor3q_u16_(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
98 }
99 veor3q_u16_(a, b, c)
100}
101
102/// Three-way exclusive OR
f2b60f7d
FG
103///
104/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)
3c0e092e
XL
105#[inline]
106#[target_feature(enable = "neon,sha3")]
107#[cfg_attr(test, assert_instr(eor3))]
108pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
109 #[allow(improper_ctypes)]
110 extern "unadjusted" {
111 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3u.v4i32")]
112 fn veor3q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
113 }
114 veor3q_u32_(a, b, c)
115}
116
117/// Three-way exclusive OR
f2b60f7d
FG
118///
119/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)
3c0e092e
XL
120#[inline]
121#[target_feature(enable = "neon,sha3")]
122#[cfg_attr(test, assert_instr(eor3))]
123pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
124 #[allow(improper_ctypes)]
125 extern "unadjusted" {
126 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.eor3u.v2i64")]
127 fn veor3q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
128 }
129 veor3q_u64_(a, b, c)
130}
131
17df50a5 132/// Absolute difference between the arguments of Floating
f2b60f7d
FG
133///
134/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)
17df50a5
XL
135#[inline]
136#[target_feature(enable = "neon")]
137#[cfg_attr(test, assert_instr(fabd))]
a2a8927a 138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
139pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
140 #[allow(improper_ctypes)]
c295e0f8 141 extern "unadjusted" {
17df50a5
XL
142 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v1f64")]
143 fn vabd_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
144 }
145 vabd_f64_(a, b)
146}
147
148/// Absolute difference between the arguments of Floating
f2b60f7d
FG
149///
150/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)
17df50a5
XL
151#[inline]
152#[target_feature(enable = "neon")]
153#[cfg_attr(test, assert_instr(fabd))]
a2a8927a 154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
155pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
156 #[allow(improper_ctypes)]
c295e0f8 157 extern "unadjusted" {
17df50a5
XL
158 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v2f64")]
159 fn vabdq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
160 }
161 vabdq_f64_(a, b)
162}
163
3c0e092e 164/// Floating-point absolute difference
f2b60f7d
FG
165///
166/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)
3c0e092e
XL
167#[inline]
168#[target_feature(enable = "neon")]
169#[cfg_attr(test, assert_instr(fabd))]
a2a8927a 170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
171pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 {
172 simd_extract(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
173}
174
175/// Floating-point absolute difference
f2b60f7d
FG
176///
177/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)
3c0e092e
XL
178#[inline]
179#[target_feature(enable = "neon")]
180#[cfg_attr(test, assert_instr(fabd))]
a2a8927a 181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
182pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 {
183 simd_extract(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
184}
185
17df50a5 186/// Unsigned Absolute difference Long
f2b60f7d
FG
187///
188/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)
17df50a5
XL
189#[inline]
190#[target_feature(enable = "neon")]
191#[cfg_attr(test, assert_instr(uabdl))]
a2a8927a 192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 193pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
353b0b11
FG
194 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
195 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
196 simd_cast(vabd_u8(c, d))
197}
198
199/// Unsigned Absolute difference Long
f2b60f7d
FG
200///
201/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)
17df50a5
XL
202#[inline]
203#[target_feature(enable = "neon")]
204#[cfg_attr(test, assert_instr(uabdl))]
a2a8927a 205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 206pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
353b0b11
FG
207 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
208 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17df50a5
XL
209 simd_cast(vabd_u16(c, d))
210}
211
212/// Unsigned Absolute difference Long
f2b60f7d
FG
213///
214/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)
17df50a5
XL
215#[inline]
216#[target_feature(enable = "neon")]
217#[cfg_attr(test, assert_instr(uabdl))]
a2a8927a 218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 219pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
353b0b11
FG
220 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
221 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
17df50a5
XL
222 simd_cast(vabd_u32(c, d))
223}
224
225/// Signed Absolute difference Long
f2b60f7d
FG
226///
227/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)
17df50a5
XL
228#[inline]
229#[target_feature(enable = "neon")]
230#[cfg_attr(test, assert_instr(sabdl))]
a2a8927a 231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 232pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
353b0b11
FG
233 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
234 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
235 let e: uint8x8_t = simd_cast(vabd_s8(c, d));
236 simd_cast(e)
237}
238
239/// Signed Absolute difference Long
f2b60f7d
FG
240///
241/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)
17df50a5
XL
242#[inline]
243#[target_feature(enable = "neon")]
244#[cfg_attr(test, assert_instr(sabdl))]
a2a8927a 245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 246pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
247 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
248 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17df50a5
XL
249 let e: uint16x4_t = simd_cast(vabd_s16(c, d));
250 simd_cast(e)
251}
252
253/// Signed Absolute difference Long
f2b60f7d
FG
254///
255/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)
17df50a5
XL
256#[inline]
257#[target_feature(enable = "neon")]
258#[cfg_attr(test, assert_instr(sabdl))]
a2a8927a 259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 260pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
261 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
262 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17df50a5
XL
263 let e: uint32x2_t = simd_cast(vabd_s32(c, d));
264 simd_cast(e)
265}
266
ba9703b0 267/// Compare bitwise Equal (vector)
f2b60f7d
FG
268///
269/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)
ba9703b0
XL
270#[inline]
271#[target_feature(enable = "neon")]
272#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
274pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
275 simd_eq(a, b)
276}
277
278/// Compare bitwise Equal (vector)
f2b60f7d
FG
279///
280/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)
ba9703b0
XL
281#[inline]
282#[target_feature(enable = "neon")]
283#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
285pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
286 simd_eq(a, b)
287}
288
289/// Compare bitwise Equal (vector)
f2b60f7d
FG
290///
291/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)
ba9703b0
XL
292#[inline]
293#[target_feature(enable = "neon")]
294#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
296pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
297 simd_eq(a, b)
298}
299
300/// Compare bitwise Equal (vector)
f2b60f7d
FG
301///
302/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)
ba9703b0
XL
303#[inline]
304#[target_feature(enable = "neon")]
305#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
307pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
308 simd_eq(a, b)
309}
310
311/// Compare bitwise Equal (vector)
f2b60f7d
FG
312///
313/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)
ba9703b0
XL
314#[inline]
315#[target_feature(enable = "neon")]
316#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
318pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
319 simd_eq(a, b)
320}
321
322/// Compare bitwise Equal (vector)
f2b60f7d
FG
323///
324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)
ba9703b0
XL
325#[inline]
326#[target_feature(enable = "neon")]
327#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
329pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
330 simd_eq(a, b)
331}
332
333/// Floating-point compare equal
f2b60f7d
FG
334///
335/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)
ba9703b0
XL
336#[inline]
337#[target_feature(enable = "neon")]
338#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
340pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
341 simd_eq(a, b)
342}
343
344/// Floating-point compare equal
f2b60f7d
FG
345///
346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)
ba9703b0
XL
347#[inline]
348#[target_feature(enable = "neon")]
349#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
351pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
352 simd_eq(a, b)
353}
354
3c0e092e 355/// Compare bitwise equal
f2b60f7d
FG
356///
357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)
3c0e092e
XL
358#[inline]
359#[target_feature(enable = "neon")]
360#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
362pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 {
363 transmute(vceq_s64(transmute(a), transmute(b)))
364}
365
366/// Compare bitwise equal
f2b60f7d
FG
367///
368/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)
3c0e092e
XL
369#[inline]
370#[target_feature(enable = "neon")]
371#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
373pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 {
374 transmute(vceq_u64(transmute(a), transmute(b)))
375}
376
377/// Floating-point compare equal
f2b60f7d
FG
378///
379/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)
3c0e092e
XL
380#[inline]
381#[target_feature(enable = "neon")]
382#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
384pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 {
385 simd_extract(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
386}
387
388/// Floating-point compare equal
f2b60f7d
FG
389///
390/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)
3c0e092e
XL
391#[inline]
392#[target_feature(enable = "neon")]
393#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
395pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 {
396 simd_extract(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
397}
398
17df50a5 399/// Signed compare bitwise equal to zero
f2b60f7d
FG
400///
401/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)
17df50a5
XL
402#[inline]
403#[target_feature(enable = "neon")]
404#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
406pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
407 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
408 simd_eq(a, transmute(b))
409}
410
411/// Signed compare bitwise equal to zero
f2b60f7d
FG
412///
413/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)
17df50a5
XL
414#[inline]
415#[target_feature(enable = "neon")]
416#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
418pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
419 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
420 simd_eq(a, transmute(b))
421}
422
423/// Signed compare bitwise equal to zero
f2b60f7d
FG
424///
425/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)
17df50a5
XL
426#[inline]
427#[target_feature(enable = "neon")]
428#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
430pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
431 let b: i16x4 = i16x4::new(0, 0, 0, 0);
432 simd_eq(a, transmute(b))
433}
434
435/// Signed compare bitwise equal to zero
f2b60f7d
FG
436///
437/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)
17df50a5
XL
438#[inline]
439#[target_feature(enable = "neon")]
440#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
442pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
443 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
444 simd_eq(a, transmute(b))
445}
446
447/// Signed compare bitwise equal to zero
f2b60f7d
FG
448///
449/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)
17df50a5
XL
450#[inline]
451#[target_feature(enable = "neon")]
452#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
454pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
455 let b: i32x2 = i32x2::new(0, 0);
456 simd_eq(a, transmute(b))
457}
458
459/// Signed compare bitwise equal to zero
f2b60f7d
FG
460///
461/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)
17df50a5
XL
462#[inline]
463#[target_feature(enable = "neon")]
464#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
466pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
467 let b: i32x4 = i32x4::new(0, 0, 0, 0);
468 simd_eq(a, transmute(b))
469}
470
471/// Signed compare bitwise equal to zero
f2b60f7d
FG
472///
473/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)
17df50a5
XL
474#[inline]
475#[target_feature(enable = "neon")]
476#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
478pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
479 let b: i64x1 = i64x1::new(0);
480 simd_eq(a, transmute(b))
481}
482
483/// Signed compare bitwise equal to zero
f2b60f7d
FG
484///
485/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)
17df50a5
XL
486#[inline]
487#[target_feature(enable = "neon")]
488#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
490pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
491 let b: i64x2 = i64x2::new(0, 0);
492 simd_eq(a, transmute(b))
493}
494
495/// Signed compare bitwise equal to zero
f2b60f7d
FG
496///
497/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)
17df50a5
XL
498#[inline]
499#[target_feature(enable = "neon")]
500#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
502pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
503 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
504 simd_eq(a, transmute(b))
505}
506
507/// Signed compare bitwise equal to zero
f2b60f7d
FG
508///
509/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)
17df50a5
XL
510#[inline]
511#[target_feature(enable = "neon")]
512#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
514pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
515 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
516 simd_eq(a, transmute(b))
517}
518
519/// Signed compare bitwise equal to zero
f2b60f7d
FG
520///
521/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)
17df50a5
XL
522#[inline]
523#[target_feature(enable = "neon")]
524#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
526pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
527 let b: i64x1 = i64x1::new(0);
528 simd_eq(a, transmute(b))
529}
530
531/// Signed compare bitwise equal to zero
f2b60f7d
FG
532///
533/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)
17df50a5
XL
534#[inline]
535#[target_feature(enable = "neon")]
536#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
538pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
539 let b: i64x2 = i64x2::new(0, 0);
540 simd_eq(a, transmute(b))
541}
542
543/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
544///
545/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)
17df50a5
XL
546#[inline]
547#[target_feature(enable = "neon")]
548#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
550pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
551 let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
552 simd_eq(a, transmute(b))
553}
554
555/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
556///
557/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)
17df50a5
XL
558#[inline]
559#[target_feature(enable = "neon")]
560#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
562pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
563 let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
564 simd_eq(a, transmute(b))
565}
566
567/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
568///
569/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)
17df50a5
XL
570#[inline]
571#[target_feature(enable = "neon")]
572#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
574pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
575 let b: u16x4 = u16x4::new(0, 0, 0, 0);
576 simd_eq(a, transmute(b))
577}
578
579/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
580///
581/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)
17df50a5
XL
582#[inline]
583#[target_feature(enable = "neon")]
584#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
586pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
587 let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
588 simd_eq(a, transmute(b))
589}
590
591/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
592///
593/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)
17df50a5
XL
594#[inline]
595#[target_feature(enable = "neon")]
596#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
598pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
599 let b: u32x2 = u32x2::new(0, 0);
600 simd_eq(a, transmute(b))
601}
602
603/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
604///
605/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)
17df50a5
XL
606#[inline]
607#[target_feature(enable = "neon")]
608#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
610pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
611 let b: u32x4 = u32x4::new(0, 0, 0, 0);
612 simd_eq(a, transmute(b))
613}
614
615/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
616///
617/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)
17df50a5
XL
618#[inline]
619#[target_feature(enable = "neon")]
620#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
622pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
623 let b: u64x1 = u64x1::new(0);
624 simd_eq(a, transmute(b))
625}
626
627/// Unsigned compare bitwise equal to zero
f2b60f7d
FG
628///
629/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)
17df50a5
XL
630#[inline]
631#[target_feature(enable = "neon")]
632#[cfg_attr(test, assert_instr(cmeq))]
a2a8927a 633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
634pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
635 let b: u64x2 = u64x2::new(0, 0);
636 simd_eq(a, transmute(b))
637}
638
639/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
640///
641/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)
17df50a5
XL
642#[inline]
643#[target_feature(enable = "neon")]
644#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
646pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
647 let b: f32x2 = f32x2::new(0.0, 0.0);
648 simd_eq(a, transmute(b))
649}
650
651/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
652///
653/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)
17df50a5
XL
654#[inline]
655#[target_feature(enable = "neon")]
656#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
658pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
659 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
660 simd_eq(a, transmute(b))
661}
662
663/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
664///
665/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)
17df50a5
XL
666#[inline]
667#[target_feature(enable = "neon")]
668#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
670pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
671 let b: f64 = 0.0;
672 simd_eq(a, transmute(b))
673}
674
675/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
676///
677/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)
17df50a5
XL
678#[inline]
679#[target_feature(enable = "neon")]
680#[cfg_attr(test, assert_instr(fcmeq))]
a2a8927a 681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
682pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
683 let b: f64x2 = f64x2::new(0.0, 0.0);
684 simd_eq(a, transmute(b))
685}
686
3c0e092e 687/// Compare bitwise equal to zero
f2b60f7d
FG
688///
689/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)
3c0e092e
XL
690#[inline]
691#[target_feature(enable = "neon")]
692#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
694pub unsafe fn vceqzd_s64(a: i64) -> u64 {
695 transmute(vceqz_s64(transmute(a)))
696}
697
698/// Compare bitwise equal to zero
f2b60f7d
FG
699///
700/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)
3c0e092e
XL
701#[inline]
702#[target_feature(enable = "neon")]
703#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
705pub unsafe fn vceqzd_u64(a: u64) -> u64 {
706 transmute(vceqz_u64(transmute(a)))
707}
708
709/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
710///
711/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)
3c0e092e
XL
712#[inline]
713#[target_feature(enable = "neon")]
714#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
716pub unsafe fn vceqzs_f32(a: f32) -> u32 {
717 simd_extract(vceqz_f32(vdup_n_f32(a)), 0)
718}
719
720/// Floating-point compare bitwise equal to zero
f2b60f7d
FG
721///
722/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)
3c0e092e
XL
723#[inline]
724#[target_feature(enable = "neon")]
725#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
727pub unsafe fn vceqzd_f64(a: f64) -> u64 {
728 simd_extract(vceqz_f64(vdup_n_f64(a)), 0)
729}
730
17df50a5 731/// Signed compare bitwise Test bits nonzero
f2b60f7d
FG
732///
733/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)
17df50a5
XL
734#[inline]
735#[target_feature(enable = "neon")]
736#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
738pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
739 let c: int64x1_t = simd_and(a, b);
740 let d: i64x1 = i64x1::new(0);
741 simd_ne(c, transmute(d))
742}
743
744/// Signed compare bitwise Test bits nonzero
f2b60f7d
FG
745///
746/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)
17df50a5
XL
747#[inline]
748#[target_feature(enable = "neon")]
749#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
751pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
752 let c: int64x2_t = simd_and(a, b);
753 let d: i64x2 = i64x2::new(0, 0);
754 simd_ne(c, transmute(d))
755}
756
757/// Signed compare bitwise Test bits nonzero
f2b60f7d
FG
758///
759/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)
17df50a5
XL
760#[inline]
761#[target_feature(enable = "neon")]
762#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
764pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
765 let c: poly64x1_t = simd_and(a, b);
766 let d: i64x1 = i64x1::new(0);
767 simd_ne(c, transmute(d))
768}
769
770/// Signed compare bitwise Test bits nonzero
f2b60f7d
FG
771///
772/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)
17df50a5
XL
773#[inline]
774#[target_feature(enable = "neon")]
775#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
777pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
778 let c: poly64x2_t = simd_and(a, b);
779 let d: i64x2 = i64x2::new(0, 0);
780 simd_ne(c, transmute(d))
781}
782
783/// Unsigned compare bitwise Test bits nonzero
f2b60f7d
FG
784///
785/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)
17df50a5
XL
786#[inline]
787#[target_feature(enable = "neon")]
788#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
790pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
791 let c: uint64x1_t = simd_and(a, b);
792 let d: u64x1 = u64x1::new(0);
793 simd_ne(c, transmute(d))
794}
795
796/// Unsigned compare bitwise Test bits nonzero
f2b60f7d
FG
797///
798/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)
17df50a5
XL
799#[inline]
800#[target_feature(enable = "neon")]
801#[cfg_attr(test, assert_instr(cmtst))]
a2a8927a 802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
803pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
804 let c: uint64x2_t = simd_and(a, b);
805 let d: u64x2 = u64x2::new(0, 0);
806 simd_ne(c, transmute(d))
807}
808
3c0e092e 809/// Compare bitwise test bits nonzero
f2b60f7d
FG
810///
811/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)
3c0e092e
XL
812#[inline]
813#[target_feature(enable = "neon")]
814#[cfg_attr(test, assert_instr(tst))]
a2a8927a 815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
816pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 {
817 transmute(vtst_s64(transmute(a), transmute(b)))
818}
819
820/// Compare bitwise test bits nonzero
f2b60f7d
FG
821///
822/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)
3c0e092e
XL
823#[inline]
824#[target_feature(enable = "neon")]
825#[cfg_attr(test, assert_instr(tst))]
a2a8927a 826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
827pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 {
828 transmute(vtst_u64(transmute(a), transmute(b)))
829}
830
831/// Signed saturating accumulate of unsigned value
f2b60f7d
FG
832///
833/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)
3c0e092e
XL
834#[inline]
835#[target_feature(enable = "neon")]
836#[cfg_attr(test, assert_instr(suqadd))]
a2a8927a 837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
838pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 {
839 #[allow(improper_ctypes)]
840 extern "unadjusted" {
841 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.suqadd.i32")]
842 fn vuqadds_s32_(a: i32, b: u32) -> i32;
843 }
844 vuqadds_s32_(a, b)
845}
846
847/// Signed saturating accumulate of unsigned value
f2b60f7d
FG
848///
849/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)
3c0e092e
XL
850#[inline]
851#[target_feature(enable = "neon")]
852#[cfg_attr(test, assert_instr(suqadd))]
a2a8927a 853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
854pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 {
855 #[allow(improper_ctypes)]
856 extern "unadjusted" {
857 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.suqadd.i64")]
858 fn vuqaddd_s64_(a: i64, b: u64) -> i64;
859 }
860 vuqaddd_s64_(a, b)
861}
862
863/// Signed saturating accumulate of unsigned value
f2b60f7d
FG
864///
865/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)
3c0e092e
XL
866#[inline]
867#[target_feature(enable = "neon")]
868#[cfg_attr(test, assert_instr(suqadd))]
a2a8927a 869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
870pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 {
871 simd_extract(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0)
872}
873
874/// Signed saturating accumulate of unsigned value
f2b60f7d
FG
875///
876/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)
3c0e092e
XL
877#[inline]
878#[target_feature(enable = "neon")]
879#[cfg_attr(test, assert_instr(suqadd))]
a2a8927a 880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
881pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 {
882 simd_extract(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0)
883}
884
17df50a5 885/// Floating-point absolute value
f2b60f7d
FG
886///
887/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)
17df50a5
XL
888#[inline]
889#[target_feature(enable = "neon")]
890#[cfg_attr(test, assert_instr(fabs))]
a2a8927a 891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
892pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t {
893 simd_fabs(a)
894}
895
896/// Floating-point absolute value
f2b60f7d
FG
897///
898/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)
17df50a5
XL
899#[inline]
900#[target_feature(enable = "neon")]
901#[cfg_attr(test, assert_instr(fabs))]
a2a8927a 902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
903pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t {
904 simd_fabs(a)
905}
906
ba9703b0 907/// Compare signed greater than
f2b60f7d
FG
908///
909/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)
ba9703b0
XL
910#[inline]
911#[target_feature(enable = "neon")]
912#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
914pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
915 simd_gt(a, b)
916}
917
918/// Compare signed greater than
f2b60f7d
FG
919///
920/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)
ba9703b0
XL
921#[inline]
922#[target_feature(enable = "neon")]
923#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
925pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
926 simd_gt(a, b)
927}
928
929/// Compare unsigned highe
f2b60f7d
FG
930///
931/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)
ba9703b0
XL
932#[inline]
933#[target_feature(enable = "neon")]
934#[cfg_attr(test, assert_instr(cmhi))]
a2a8927a 935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
936pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
937 simd_gt(a, b)
938}
939
940/// Compare unsigned highe
f2b60f7d
FG
941///
942/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)
ba9703b0
XL
943#[inline]
944#[target_feature(enable = "neon")]
945#[cfg_attr(test, assert_instr(cmhi))]
a2a8927a 946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
947pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
948 simd_gt(a, b)
949}
950
951/// Floating-point compare greater than
f2b60f7d
FG
952///
953/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)
ba9703b0
XL
954#[inline]
955#[target_feature(enable = "neon")]
956#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
958pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
959 simd_gt(a, b)
960}
961
962/// Floating-point compare greater than
f2b60f7d
FG
963///
964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)
ba9703b0
XL
965#[inline]
966#[target_feature(enable = "neon")]
967#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
969pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
970 simd_gt(a, b)
971}
972
3c0e092e 973/// Compare greater than
f2b60f7d
FG
974///
975/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)
3c0e092e
XL
976#[inline]
977#[target_feature(enable = "neon")]
978#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
980pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 {
981 transmute(vcgt_s64(transmute(a), transmute(b)))
982}
983
984/// Compare greater than
f2b60f7d
FG
985///
986/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)
3c0e092e
XL
987#[inline]
988#[target_feature(enable = "neon")]
989#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
991pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 {
992 transmute(vcgt_u64(transmute(a), transmute(b)))
993}
994
995/// Floating-point compare greater than
f2b60f7d
FG
996///
997/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)
3c0e092e
XL
998#[inline]
999#[target_feature(enable = "neon")]
1000#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1002pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 {
1003 simd_extract(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1004}
1005
1006/// Floating-point compare greater than
f2b60f7d
FG
1007///
1008/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)
3c0e092e
XL
1009#[inline]
1010#[target_feature(enable = "neon")]
1011#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1013pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 {
1014 simd_extract(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1015}
1016
ba9703b0 1017/// Compare signed less than
f2b60f7d
FG
1018///
1019/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)
ba9703b0
XL
1020#[inline]
1021#[target_feature(enable = "neon")]
1022#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1024pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1025 simd_lt(a, b)
1026}
1027
1028/// Compare signed less than
f2b60f7d
FG
1029///
1030/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)
ba9703b0
XL
1031#[inline]
1032#[target_feature(enable = "neon")]
1033#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1035pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1036 simd_lt(a, b)
1037}
1038
1039/// Compare unsigned less than
f2b60f7d
FG
1040///
1041/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)
ba9703b0
XL
1042#[inline]
1043#[target_feature(enable = "neon")]
1044#[cfg_attr(test, assert_instr(cmhi))]
a2a8927a 1045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1046pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1047 simd_lt(a, b)
1048}
1049
1050/// Compare unsigned less than
f2b60f7d
FG
1051///
1052/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)
ba9703b0
XL
1053#[inline]
1054#[target_feature(enable = "neon")]
1055#[cfg_attr(test, assert_instr(cmhi))]
a2a8927a 1056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1057pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1058 simd_lt(a, b)
1059}
1060
1061/// Floating-point compare less than
f2b60f7d
FG
1062///
1063/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)
ba9703b0
XL
1064#[inline]
1065#[target_feature(enable = "neon")]
1066#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1068pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1069 simd_lt(a, b)
1070}
1071
1072/// Floating-point compare less than
f2b60f7d
FG
1073///
1074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)
ba9703b0
XL
1075#[inline]
1076#[target_feature(enable = "neon")]
1077#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1079pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1080 simd_lt(a, b)
1081}
1082
3c0e092e 1083/// Compare less than
f2b60f7d
FG
1084///
1085/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)
3c0e092e
XL
1086#[inline]
1087#[target_feature(enable = "neon")]
1088#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1090pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 {
1091 transmute(vclt_s64(transmute(a), transmute(b)))
1092}
1093
1094/// Compare less than
f2b60f7d
FG
1095///
1096/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)
3c0e092e
XL
1097#[inline]
1098#[target_feature(enable = "neon")]
1099#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1101pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 {
1102 transmute(vclt_u64(transmute(a), transmute(b)))
1103}
1104
1105/// Floating-point compare less than
f2b60f7d
FG
1106///
1107/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)
3c0e092e
XL
1108#[inline]
1109#[target_feature(enable = "neon")]
1110#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1112pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 {
1113 simd_extract(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1114}
1115
1116/// Floating-point compare less than
f2b60f7d
FG
1117///
1118/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)
3c0e092e
XL
1119#[inline]
1120#[target_feature(enable = "neon")]
1121#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1123pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 {
1124 simd_extract(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1125}
1126
ba9703b0 1127/// Compare signed less than or equal
f2b60f7d
FG
1128///
1129/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)
ba9703b0
XL
1130#[inline]
1131#[target_feature(enable = "neon")]
1132#[cfg_attr(test, assert_instr(cmge))]
a2a8927a 1133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1134pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1135 simd_le(a, b)
1136}
1137
1138/// Compare signed less than or equal
f2b60f7d
FG
1139///
1140/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)
ba9703b0
XL
1141#[inline]
1142#[target_feature(enable = "neon")]
1143#[cfg_attr(test, assert_instr(cmge))]
a2a8927a 1144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1145pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1146 simd_le(a, b)
1147}
1148
3c0e092e 1149/// Compare greater than or equal
f2b60f7d
FG
1150///
1151/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)
3c0e092e
XL
1152#[inline]
1153#[target_feature(enable = "neon")]
1154#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1156pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 {
1157 transmute(vcge_s64(transmute(a), transmute(b)))
1158}
1159
1160/// Compare greater than or equal
f2b60f7d
FG
1161///
1162/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)
3c0e092e
XL
1163#[inline]
1164#[target_feature(enable = "neon")]
1165#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1167pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 {
1168 transmute(vcge_u64(transmute(a), transmute(b)))
1169}
1170
1171/// Floating-point compare greater than or equal
f2b60f7d
FG
1172///
1173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)
3c0e092e
XL
1174#[inline]
1175#[target_feature(enable = "neon")]
1176#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1178pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 {
1179 simd_extract(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1180}
1181
1182/// Floating-point compare greater than or equal
f2b60f7d
FG
1183///
1184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)
3c0e092e
XL
1185#[inline]
1186#[target_feature(enable = "neon")]
1187#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1189pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 {
1190 simd_extract(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1191}
1192
ba9703b0 1193/// Compare unsigned less than or equal
f2b60f7d
FG
1194///
1195/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)
ba9703b0
XL
1196#[inline]
1197#[target_feature(enable = "neon")]
1198#[cfg_attr(test, assert_instr(cmhs))]
a2a8927a 1199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1200pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1201 simd_le(a, b)
1202}
1203
1204/// Compare unsigned less than or equal
f2b60f7d
FG
1205///
1206/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)
ba9703b0
XL
1207#[inline]
1208#[target_feature(enable = "neon")]
1209#[cfg_attr(test, assert_instr(cmhs))]
a2a8927a 1210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1211pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1212 simd_le(a, b)
1213}
1214
1215/// Floating-point compare less than or equal
f2b60f7d
FG
1216///
1217/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)
ba9703b0
XL
1218#[inline]
1219#[target_feature(enable = "neon")]
1220#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1222pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1223 simd_le(a, b)
1224}
1225
1226/// Floating-point compare less than or equal
f2b60f7d
FG
1227///
1228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)
ba9703b0
XL
1229#[inline]
1230#[target_feature(enable = "neon")]
1231#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1233pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1234 simd_le(a, b)
1235}
1236
3c0e092e 1237/// Compare less than or equal
f2b60f7d
FG
1238///
1239/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)
3c0e092e
XL
1240#[inline]
1241#[target_feature(enable = "neon")]
1242#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1244pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 {
1245 transmute(vcle_s64(transmute(a), transmute(b)))
1246}
1247
1248/// Compare less than or equal
f2b60f7d
FG
1249///
1250/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)
3c0e092e
XL
1251#[inline]
1252#[target_feature(enable = "neon")]
1253#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1255pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 {
1256 transmute(vcle_u64(transmute(a), transmute(b)))
1257}
1258
1259/// Floating-point compare less than or equal
f2b60f7d
FG
1260///
1261/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)
3c0e092e
XL
1262#[inline]
1263#[target_feature(enable = "neon")]
1264#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1266pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 {
1267 simd_extract(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1268}
1269
1270/// Floating-point compare less than or equal
f2b60f7d
FG
1271///
1272/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)
3c0e092e
XL
1273#[inline]
1274#[target_feature(enable = "neon")]
1275#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1277pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 {
1278 simd_extract(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1279}
1280
ba9703b0 1281/// Compare signed greater than or equal
f2b60f7d
FG
1282///
1283/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)
ba9703b0
XL
1284#[inline]
1285#[target_feature(enable = "neon")]
1286#[cfg_attr(test, assert_instr(cmge))]
a2a8927a 1287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1288pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1289 simd_ge(a, b)
1290}
1291
1292/// Compare signed greater than or equal
f2b60f7d
FG
1293///
1294/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)
ba9703b0
XL
1295#[inline]
1296#[target_feature(enable = "neon")]
1297#[cfg_attr(test, assert_instr(cmge))]
a2a8927a 1298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1299pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1300 simd_ge(a, b)
1301}
1302
1303/// Compare unsigned greater than or equal
f2b60f7d
FG
1304///
1305/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)
ba9703b0
XL
1306#[inline]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(cmhs))]
a2a8927a 1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1310pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1311 simd_ge(a, b)
1312}
1313
1314/// Compare unsigned greater than or equal
f2b60f7d
FG
1315///
1316/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)
ba9703b0
XL
1317#[inline]
1318#[target_feature(enable = "neon")]
1319#[cfg_attr(test, assert_instr(cmhs))]
a2a8927a 1320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1321pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1322 simd_ge(a, b)
1323}
1324
1325/// Floating-point compare greater than or equal
f2b60f7d
FG
1326///
1327/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)
ba9703b0
XL
1328#[inline]
1329#[target_feature(enable = "neon")]
1330#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1332pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1333 simd_ge(a, b)
1334}
1335
1336/// Floating-point compare greater than or equal
f2b60f7d
FG
1337///
1338/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)
ba9703b0
XL
1339#[inline]
1340#[target_feature(enable = "neon")]
1341#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
ba9703b0
XL
1343pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1344 simd_ge(a, b)
1345}
1346
17df50a5 1347/// Compare signed greater than or equal to zero
f2b60f7d
FG
1348///
1349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)
ba9703b0
XL
1350#[inline]
1351#[target_feature(enable = "neon")]
49aad941 1352#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1354pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
1355 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1356 simd_ge(a, transmute(b))
ba9703b0
XL
1357}
1358
17df50a5 1359/// Compare signed greater than or equal to zero
f2b60f7d
FG
1360///
1361/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)
ba9703b0
XL
1362#[inline]
1363#[target_feature(enable = "neon")]
49aad941 1364#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1366pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
1367 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1368 simd_ge(a, transmute(b))
ba9703b0
XL
1369}
1370
17df50a5 1371/// Compare signed greater than or equal to zero
f2b60f7d
FG
1372///
1373/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)
ba9703b0
XL
1374#[inline]
1375#[target_feature(enable = "neon")]
49aad941 1376#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1378pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
1379 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1380 simd_ge(a, transmute(b))
ba9703b0
XL
1381}
1382
17df50a5 1383/// Compare signed greater than or equal to zero
f2b60f7d
FG
1384///
1385/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)
ba9703b0
XL
1386#[inline]
1387#[target_feature(enable = "neon")]
49aad941 1388#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1390pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
1391 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1392 simd_ge(a, transmute(b))
ba9703b0
XL
1393}
1394
17df50a5 1395/// Compare signed greater than or equal to zero
f2b60f7d
FG
1396///
1397/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)
fc512014
XL
1398#[inline]
1399#[target_feature(enable = "neon")]
49aad941 1400#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1402pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
1403 let b: i32x2 = i32x2::new(0, 0);
1404 simd_ge(a, transmute(b))
fc512014
XL
1405}
1406
17df50a5 1407/// Compare signed greater than or equal to zero
f2b60f7d
FG
1408///
1409/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)
fc512014
XL
1410#[inline]
1411#[target_feature(enable = "neon")]
49aad941 1412#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1414pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
1415 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1416 simd_ge(a, transmute(b))
fc512014
XL
1417}
1418
17df50a5 1419/// Compare signed greater than or equal to zero
f2b60f7d
FG
1420///
1421/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)
fc512014
XL
1422#[inline]
1423#[target_feature(enable = "neon")]
49aad941 1424#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1426pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
1427 let b: i64x1 = i64x1::new(0);
1428 simd_ge(a, transmute(b))
fc512014
XL
1429}
1430
17df50a5 1431/// Compare signed greater than or equal to zero
f2b60f7d
FG
1432///
1433/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)
fc512014
XL
1434#[inline]
1435#[target_feature(enable = "neon")]
49aad941 1436#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1438pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
1439 let b: i64x2 = i64x2::new(0, 0);
1440 simd_ge(a, transmute(b))
fc512014
XL
1441}
1442
17df50a5 1443/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1444///
1445/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)
17df50a5
XL
1446#[inline]
1447#[target_feature(enable = "neon")]
1448#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1450pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1451 let b: f32x2 = f32x2::new(0.0, 0.0);
1452 simd_ge(a, transmute(b))
1453}
1454
1455/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1456///
1457/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)
17df50a5
XL
1458#[inline]
1459#[target_feature(enable = "neon")]
1460#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1462pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1463 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1464 simd_ge(a, transmute(b))
1465}
1466
1467/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1468///
1469/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)
17df50a5
XL
1470#[inline]
1471#[target_feature(enable = "neon")]
1472#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1474pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1475 let b: f64 = 0.0;
1476 simd_ge(a, transmute(b))
1477}
1478
1479/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1480///
1481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)
17df50a5
XL
1482#[inline]
1483#[target_feature(enable = "neon")]
1484#[cfg_attr(test, assert_instr(fcmge))]
a2a8927a 1485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1486pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
1487 let b: f64x2 = f64x2::new(0.0, 0.0);
1488 simd_ge(a, transmute(b))
1489}
1490
3c0e092e 1491/// Compare signed greater than or equal to zero
f2b60f7d
FG
1492///
1493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)
3c0e092e
XL
1494#[inline]
1495#[target_feature(enable = "neon")]
49aad941 1496#[cfg_attr(test, assert_instr(nop))]
a2a8927a 1497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1498pub unsafe fn vcgezd_s64(a: i64) -> u64 {
1499 transmute(vcgez_s64(transmute(a)))
1500}
1501
1502/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1503///
1504/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)
3c0e092e
XL
1505#[inline]
1506#[target_feature(enable = "neon")]
1507#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1509pub unsafe fn vcgezs_f32(a: f32) -> u32 {
1510 simd_extract(vcgez_f32(vdup_n_f32(a)), 0)
1511}
1512
1513/// Floating-point compare greater than or equal to zero
f2b60f7d
FG
1514///
1515/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)
3c0e092e
XL
1516#[inline]
1517#[target_feature(enable = "neon")]
1518#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1520pub unsafe fn vcgezd_f64(a: f64) -> u64 {
1521 simd_extract(vcgez_f64(vdup_n_f64(a)), 0)
1522}
1523
17df50a5 1524/// Compare signed greater than zero
f2b60f7d
FG
1525///
1526/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)
17df50a5
XL
1527#[inline]
1528#[target_feature(enable = "neon")]
1529#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1531pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
1532 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1533 simd_gt(a, transmute(b))
1534}
1535
1536/// Compare signed greater than zero
f2b60f7d
FG
1537///
1538/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)
17df50a5
XL
1539#[inline]
1540#[target_feature(enable = "neon")]
1541#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1543pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
1544 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1545 simd_gt(a, transmute(b))
1546}
1547
1548/// Compare signed greater than zero
f2b60f7d
FG
1549///
1550/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)
17df50a5
XL
1551#[inline]
1552#[target_feature(enable = "neon")]
1553#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1555pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
1556 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1557 simd_gt(a, transmute(b))
1558}
1559
1560/// Compare signed greater than zero
f2b60f7d
FG
1561///
1562/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)
17df50a5
XL
1563#[inline]
1564#[target_feature(enable = "neon")]
1565#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1567pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
1568 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1569 simd_gt(a, transmute(b))
1570}
1571
1572/// Compare signed greater than zero
f2b60f7d
FG
1573///
1574/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)
17df50a5
XL
1575#[inline]
1576#[target_feature(enable = "neon")]
1577#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1579pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
1580 let b: i32x2 = i32x2::new(0, 0);
1581 simd_gt(a, transmute(b))
1582}
1583
1584/// Compare signed greater than zero
f2b60f7d
FG
1585///
1586/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)
17df50a5
XL
1587#[inline]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1591pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
1592 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1593 simd_gt(a, transmute(b))
1594}
1595
1596/// Compare signed greater than zero
f2b60f7d
FG
1597///
1598/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)
17df50a5
XL
1599#[inline]
1600#[target_feature(enable = "neon")]
1601#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1603pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
1604 let b: i64x1 = i64x1::new(0);
1605 simd_gt(a, transmute(b))
1606}
1607
1608/// Compare signed greater than zero
f2b60f7d
FG
1609///
1610/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)
17df50a5
XL
1611#[inline]
1612#[target_feature(enable = "neon")]
1613#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1615pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
1616 let b: i64x2 = i64x2::new(0, 0);
1617 simd_gt(a, transmute(b))
1618}
1619
1620/// Floating-point compare greater than zero
f2b60f7d
FG
1621///
1622/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)
17df50a5
XL
1623#[inline]
1624#[target_feature(enable = "neon")]
1625#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1627pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
1628 let b: f32x2 = f32x2::new(0.0, 0.0);
1629 simd_gt(a, transmute(b))
1630}
1631
1632/// Floating-point compare greater than zero
f2b60f7d
FG
1633///
1634/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)
17df50a5
XL
1635#[inline]
1636#[target_feature(enable = "neon")]
1637#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1639pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
1640 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1641 simd_gt(a, transmute(b))
1642}
1643
1644/// Floating-point compare greater than zero
f2b60f7d
FG
1645///
1646/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)
17df50a5
XL
1647#[inline]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1651pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
1652 let b: f64 = 0.0;
1653 simd_gt(a, transmute(b))
1654}
1655
1656/// Floating-point compare greater than zero
f2b60f7d
FG
1657///
1658/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)
17df50a5
XL
1659#[inline]
1660#[target_feature(enable = "neon")]
1661#[cfg_attr(test, assert_instr(fcmgt))]
a2a8927a 1662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1663pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
1664 let b: f64x2 = f64x2::new(0.0, 0.0);
1665 simd_gt(a, transmute(b))
1666}
1667
3c0e092e 1668/// Compare signed greater than zero
f2b60f7d
FG
1669///
1670/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)
3c0e092e
XL
1671#[inline]
1672#[target_feature(enable = "neon")]
1673#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1675pub unsafe fn vcgtzd_s64(a: i64) -> u64 {
1676 transmute(vcgtz_s64(transmute(a)))
1677}
1678
1679/// Floating-point compare greater than zero
f2b60f7d
FG
1680///
1681/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)
3c0e092e
XL
1682#[inline]
1683#[target_feature(enable = "neon")]
1684#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1686pub unsafe fn vcgtzs_f32(a: f32) -> u32 {
1687 simd_extract(vcgtz_f32(vdup_n_f32(a)), 0)
1688}
1689
1690/// Floating-point compare greater than zero
f2b60f7d
FG
1691///
1692/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)
3c0e092e
XL
1693#[inline]
1694#[target_feature(enable = "neon")]
1695#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1697pub unsafe fn vcgtzd_f64(a: f64) -> u64 {
1698 simd_extract(vcgtz_f64(vdup_n_f64(a)), 0)
1699}
1700
17df50a5 1701/// Compare signed less than or equal to zero
f2b60f7d
FG
1702///
1703/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)
17df50a5
XL
1704#[inline]
1705#[target_feature(enable = "neon")]
1706#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1708pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t {
1709 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1710 simd_le(a, transmute(b))
1711}
1712
1713/// Compare signed less than or equal to zero
f2b60f7d
FG
1714///
1715/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)
17df50a5
XL
1716#[inline]
1717#[target_feature(enable = "neon")]
1718#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1720pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
1721 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1722 simd_le(a, transmute(b))
1723}
1724
1725/// Compare signed less than or equal to zero
f2b60f7d
FG
1726///
1727/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)
17df50a5
XL
1728#[inline]
1729#[target_feature(enable = "neon")]
1730#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1732pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t {
1733 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1734 simd_le(a, transmute(b))
1735}
1736
1737/// Compare signed less than or equal to zero
f2b60f7d
FG
1738///
1739/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)
17df50a5
XL
1740#[inline]
1741#[target_feature(enable = "neon")]
1742#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1744pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
1745 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1746 simd_le(a, transmute(b))
1747}
1748
1749/// Compare signed less than or equal to zero
f2b60f7d
FG
1750///
1751/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)
17df50a5
XL
1752#[inline]
1753#[target_feature(enable = "neon")]
1754#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1756pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t {
1757 let b: i32x2 = i32x2::new(0, 0);
1758 simd_le(a, transmute(b))
1759}
1760
1761/// Compare signed less than or equal to zero
f2b60f7d
FG
1762///
1763/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)
17df50a5
XL
1764#[inline]
1765#[target_feature(enable = "neon")]
1766#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1768pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
1769 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1770 simd_le(a, transmute(b))
1771}
1772
1773/// Compare signed less than or equal to zero
f2b60f7d
FG
1774///
1775/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)
17df50a5
XL
1776#[inline]
1777#[target_feature(enable = "neon")]
1778#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1780pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t {
1781 let b: i64x1 = i64x1::new(0);
1782 simd_le(a, transmute(b))
1783}
1784
1785/// Compare signed less than or equal to zero
f2b60f7d
FG
1786///
1787/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)
17df50a5
XL
1788#[inline]
1789#[target_feature(enable = "neon")]
1790#[cfg_attr(test, assert_instr(cmgt))]
a2a8927a 1791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1792pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
1793 let b: i64x2 = i64x2::new(0, 0);
1794 simd_le(a, transmute(b))
1795}
1796
1797/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1798///
1799/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)
17df50a5
XL
1800#[inline]
1801#[target_feature(enable = "neon")]
1802#[cfg_attr(test, assert_instr(fcmle))]
a2a8927a 1803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1804pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t {
1805 let b: f32x2 = f32x2::new(0.0, 0.0);
1806 simd_le(a, transmute(b))
1807}
1808
1809/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1810///
1811/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)
17df50a5
XL
1812#[inline]
1813#[target_feature(enable = "neon")]
1814#[cfg_attr(test, assert_instr(fcmle))]
a2a8927a 1815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1816pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
1817 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1818 simd_le(a, transmute(b))
1819}
1820
1821/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1822///
1823/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)
17df50a5
XL
1824#[inline]
1825#[target_feature(enable = "neon")]
1826#[cfg_attr(test, assert_instr(fcmle))]
a2a8927a 1827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1828pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t {
1829 let b: f64 = 0.0;
1830 simd_le(a, transmute(b))
1831}
1832
1833/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1834///
1835/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)
17df50a5
XL
1836#[inline]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(fcmle))]
a2a8927a 1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1840pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
1841 let b: f64x2 = f64x2::new(0.0, 0.0);
1842 simd_le(a, transmute(b))
1843}
1844
3c0e092e 1845/// Compare less than or equal to zero
f2b60f7d
FG
1846///
1847/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)
3c0e092e
XL
1848#[inline]
1849#[target_feature(enable = "neon")]
1850#[cfg_attr(test, assert_instr(cmp))]
a2a8927a 1851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1852pub unsafe fn vclezd_s64(a: i64) -> u64 {
1853 transmute(vclez_s64(transmute(a)))
1854}
1855
1856/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1857///
1858/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)
3c0e092e
XL
1859#[inline]
1860#[target_feature(enable = "neon")]
1861#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1863pub unsafe fn vclezs_f32(a: f32) -> u32 {
1864 simd_extract(vclez_f32(vdup_n_f32(a)), 0)
1865}
1866
1867/// Floating-point compare less than or equal to zero
f2b60f7d
FG
1868///
1869/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)
3c0e092e
XL
1870#[inline]
1871#[target_feature(enable = "neon")]
1872#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 1873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
1874pub unsafe fn vclezd_f64(a: f64) -> u64 {
1875 simd_extract(vclez_f64(vdup_n_f64(a)), 0)
1876}
1877
17df50a5 1878/// Compare signed less than zero
f2b60f7d
FG
1879///
1880/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)
17df50a5
XL
1881#[inline]
1882#[target_feature(enable = "neon")]
5e7ed085 1883#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1885pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
1886 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1887 simd_lt(a, transmute(b))
1888}
1889
1890/// Compare signed less than zero
f2b60f7d
FG
1891///
1892/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)
17df50a5
XL
1893#[inline]
1894#[target_feature(enable = "neon")]
5e7ed085 1895#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1897pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
1898 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1899 simd_lt(a, transmute(b))
1900}
1901
1902/// Compare signed less than zero
f2b60f7d
FG
1903///
1904/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)
17df50a5
XL
1905#[inline]
1906#[target_feature(enable = "neon")]
5e7ed085 1907#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1909pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
1910 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1911 simd_lt(a, transmute(b))
1912}
1913
1914/// Compare signed less than zero
f2b60f7d
FG
1915///
1916/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)
17df50a5
XL
1917#[inline]
1918#[target_feature(enable = "neon")]
5e7ed085 1919#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1921pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
1922 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1923 simd_lt(a, transmute(b))
1924}
1925
1926/// Compare signed less than zero
f2b60f7d
FG
1927///
1928/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)
17df50a5
XL
1929#[inline]
1930#[target_feature(enable = "neon")]
5e7ed085 1931#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1933pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
1934 let b: i32x2 = i32x2::new(0, 0);
1935 simd_lt(a, transmute(b))
1936}
1937
1938/// Compare signed less than zero
f2b60f7d
FG
1939///
1940/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)
17df50a5
XL
1941#[inline]
1942#[target_feature(enable = "neon")]
5e7ed085 1943#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1945pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
1946 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1947 simd_lt(a, transmute(b))
1948}
1949
1950/// Compare signed less than zero
f2b60f7d
FG
1951///
1952/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)
17df50a5
XL
1953#[inline]
1954#[target_feature(enable = "neon")]
5e7ed085 1955#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1957pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
1958 let b: i64x1 = i64x1::new(0);
1959 simd_lt(a, transmute(b))
1960}
1961
1962/// Compare signed less than zero
f2b60f7d
FG
1963///
1964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)
17df50a5
XL
1965#[inline]
1966#[target_feature(enable = "neon")]
5e7ed085 1967#[cfg_attr(test, assert_instr(cmlt))]
a2a8927a 1968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1969pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
1970 let b: i64x2 = i64x2::new(0, 0);
1971 simd_lt(a, transmute(b))
1972}
1973
1974/// Floating-point compare less than zero
f2b60f7d
FG
1975///
1976/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)
17df50a5
XL
1977#[inline]
1978#[target_feature(enable = "neon")]
1979#[cfg_attr(test, assert_instr(fcmlt))]
a2a8927a 1980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1981pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
1982 let b: f32x2 = f32x2::new(0.0, 0.0);
1983 simd_lt(a, transmute(b))
1984}
1985
1986/// Floating-point compare less than zero
f2b60f7d
FG
1987///
1988/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)
17df50a5
XL
1989#[inline]
1990#[target_feature(enable = "neon")]
1991#[cfg_attr(test, assert_instr(fcmlt))]
a2a8927a 1992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
1993pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
1994 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1995 simd_lt(a, transmute(b))
1996}
1997
1998/// Floating-point compare less than zero
f2b60f7d
FG
1999///
2000/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)
17df50a5
XL
2001#[inline]
2002#[target_feature(enable = "neon")]
2003#[cfg_attr(test, assert_instr(fcmlt))]
a2a8927a 2004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2005pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2006 let b: f64 = 0.0;
2007 simd_lt(a, transmute(b))
2008}
2009
2010/// Floating-point compare less than zero
f2b60f7d
FG
2011///
2012/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)
17df50a5
XL
2013#[inline]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(fcmlt))]
a2a8927a 2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2017pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2018 let b: f64x2 = f64x2::new(0.0, 0.0);
2019 simd_lt(a, transmute(b))
2020}
2021
3c0e092e 2022/// Compare less than zero
f2b60f7d
FG
2023///
2024/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)
3c0e092e
XL
2025#[inline]
2026#[target_feature(enable = "neon")]
2027#[cfg_attr(test, assert_instr(asr))]
a2a8927a 2028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2029pub unsafe fn vcltzd_s64(a: i64) -> u64 {
2030 transmute(vcltz_s64(transmute(a)))
2031}
2032
2033/// Floating-point compare less than zero
f2b60f7d
FG
2034///
2035/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)
3c0e092e
XL
2036#[inline]
2037#[target_feature(enable = "neon")]
2038#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 2039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2040pub unsafe fn vcltzs_f32(a: f32) -> u32 {
2041 simd_extract(vcltz_f32(vdup_n_f32(a)), 0)
2042}
2043
2044/// Floating-point compare less than zero
f2b60f7d
FG
2045///
2046/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)
3c0e092e
XL
2047#[inline]
2048#[target_feature(enable = "neon")]
2049#[cfg_attr(test, assert_instr(fcmp))]
a2a8927a 2050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2051pub unsafe fn vcltzd_f64(a: f64) -> u64 {
2052 simd_extract(vcltz_f64(vdup_n_f64(a)), 0)
2053}
2054
17df50a5 2055/// Floating-point absolute compare greater than
f2b60f7d
FG
2056///
2057/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)
17df50a5
XL
2058#[inline]
2059#[target_feature(enable = "neon")]
2060#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2062pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2063 #[allow(improper_ctypes)]
c295e0f8 2064 extern "unadjusted" {
17df50a5
XL
2065 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64")]
2066 fn vcagt_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
2067 }
2068 vcagt_f64_(a, b)
2069}
2070
2071/// Floating-point absolute compare greater than
f2b60f7d
FG
2072///
2073/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)
17df50a5
XL
2074#[inline]
2075#[target_feature(enable = "neon")]
2076#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2078pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2079 #[allow(improper_ctypes)]
c295e0f8 2080 extern "unadjusted" {
17df50a5
XL
2081 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64")]
2082 fn vcagtq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
2083 }
2084 vcagtq_f64_(a, b)
2085}
2086
3c0e092e 2087/// Floating-point absolute compare greater than
f2b60f7d
FG
2088///
2089/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)
3c0e092e
XL
2090#[inline]
2091#[target_feature(enable = "neon")]
2092#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2094pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 {
2095 #[allow(improper_ctypes)]
2096 extern "unadjusted" {
2097 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.i32.f32")]
2098 fn vcagts_f32_(a: f32, b: f32) -> u32;
2099 }
2100 vcagts_f32_(a, b)
2101}
2102
2103/// Floating-point absolute compare greater than
f2b60f7d
FG
2104///
2105/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)
3c0e092e
XL
2106#[inline]
2107#[target_feature(enable = "neon")]
2108#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2110pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 {
2111 #[allow(improper_ctypes)]
2112 extern "unadjusted" {
2113 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.i64.f64")]
2114 fn vcagtd_f64_(a: f64, b: f64) -> u64;
2115 }
2116 vcagtd_f64_(a, b)
2117}
2118
2119/// Floating-point absolute compare greater than or equal
f2b60f7d
FG
2120///
2121/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)
17df50a5
XL
2122#[inline]
2123#[target_feature(enable = "neon")]
2124#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2126pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2127 #[allow(improper_ctypes)]
c295e0f8 2128 extern "unadjusted" {
17df50a5
XL
2129 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v1i64.v1f64")]
2130 fn vcage_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
2131 }
2132 vcage_f64_(a, b)
2133}
2134
2135/// Floating-point absolute compare greater than or equal
f2b60f7d
FG
2136///
2137/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)
17df50a5
XL
2138#[inline]
2139#[target_feature(enable = "neon")]
2140#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2142pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2143 #[allow(improper_ctypes)]
c295e0f8 2144 extern "unadjusted" {
17df50a5
XL
2145 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v2i64.v2f64")]
2146 fn vcageq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
2147 }
2148 vcageq_f64_(a, b)
2149}
2150
3c0e092e 2151/// Floating-point absolute compare greater than or equal
f2b60f7d
FG
2152///
2153/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)
3c0e092e
XL
2154#[inline]
2155#[target_feature(enable = "neon")]
2156#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2158pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 {
2159 #[allow(improper_ctypes)]
2160 extern "unadjusted" {
2161 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.i32.f32")]
2162 fn vcages_f32_(a: f32, b: f32) -> u32;
2163 }
2164 vcages_f32_(a, b)
2165}
2166
2167/// Floating-point absolute compare greater than or equal
f2b60f7d
FG
2168///
2169/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)
3c0e092e
XL
2170#[inline]
2171#[target_feature(enable = "neon")]
2172#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2174pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 {
2175 #[allow(improper_ctypes)]
2176 extern "unadjusted" {
2177 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.i64.f64")]
2178 fn vcaged_f64_(a: f64, b: f64) -> u64;
2179 }
2180 vcaged_f64_(a, b)
2181}
2182
17df50a5 2183/// Floating-point absolute compare less than
f2b60f7d
FG
2184///
2185/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)
17df50a5
XL
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2190pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2191 vcagt_f64(b, a)
2192}
2193
2194/// Floating-point absolute compare less than
f2b60f7d
FG
2195///
2196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)
17df50a5
XL
2197#[inline]
2198#[target_feature(enable = "neon")]
2199#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2201pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2202 vcagtq_f64(b, a)
2203}
2204
3c0e092e 2205/// Floating-point absolute compare less than
f2b60f7d
FG
2206///
2207/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)
3c0e092e
XL
2208#[inline]
2209#[target_feature(enable = "neon")]
2210#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2212pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 {
2213 vcagts_f32(b, a)
2214}
2215
2216/// Floating-point absolute compare less than
f2b60f7d
FG
2217///
2218/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)
3c0e092e
XL
2219#[inline]
2220#[target_feature(enable = "neon")]
2221#[cfg_attr(test, assert_instr(facgt))]
a2a8927a 2222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2223pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 {
2224 vcagtd_f64(b, a)
2225}
2226
17df50a5 2227/// Floating-point absolute compare less than or equal
f2b60f7d
FG
2228///
2229/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)
17df50a5
XL
2230#[inline]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2234pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2235 vcage_f64(b, a)
2236}
2237
2238/// Floating-point absolute compare less than or equal
f2b60f7d
FG
2239///
2240/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)
17df50a5
XL
2241#[inline]
2242#[target_feature(enable = "neon")]
2243#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
2245pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2246 vcageq_f64(b, a)
2247}
2248
3c0e092e 2249/// Floating-point absolute compare less than or equal
f2b60f7d
FG
2250///
2251/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)
3c0e092e
XL
2252#[inline]
2253#[target_feature(enable = "neon")]
2254#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2256pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 {
2257 vcages_f32(b, a)
2258}
2259
2260/// Floating-point absolute compare less than or equal
f2b60f7d
FG
2261///
2262/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)
3c0e092e
XL
2263#[inline]
2264#[target_feature(enable = "neon")]
2265#[cfg_attr(test, assert_instr(facge))]
a2a8927a 2266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
2267pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 {
2268 vcaged_f64(b, a)
2269}
2270
17df50a5 2271/// Insert vector element from another vector element
f2b60f7d
FG
2272///
2273/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)
17df50a5
XL
2274#[inline]
2275#[target_feature(enable = "neon")]
2276#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2277#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2279pub unsafe fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11
FG
2280 static_assert_uimm_bits!(LANE1, 3);
2281 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2282 match LANE1 & 0b111 {
353b0b11
FG
2283 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2284 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2285 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2286 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2287 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2288 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2289 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2290 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2291 _ => unreachable_unchecked(),
2292 }
2293}
2294
2295/// Insert vector element from another vector element
f2b60f7d
FG
2296///
2297/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)
17df50a5
XL
2298#[inline]
2299#[target_feature(enable = "neon")]
2300#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2301#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2303pub unsafe fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11
FG
2304 static_assert_uimm_bits!(LANE1, 4);
2305 static_assert_uimm_bits!(LANE2, 4);
17df50a5 2306 match LANE1 & 0b1111 {
353b0b11
FG
2307 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2308 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2309 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2310 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2311 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2312 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2313 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2314 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2315 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2316 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2317 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2318 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2319 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2320 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2321 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2322 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
2323 _ => unreachable_unchecked(),
2324 }
2325}
2326
2327/// Insert vector element from another vector element
f2b60f7d
FG
2328///
2329/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)
17df50a5
XL
2330#[inline]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2333#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2335pub unsafe fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11
FG
2336 static_assert_uimm_bits!(LANE1, 2);
2337 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2338 match LANE1 & 0b11 {
353b0b11
FG
2339 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2340 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2341 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2342 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2343 _ => unreachable_unchecked(),
2344 }
2345}
2346
2347/// Insert vector element from another vector element
f2b60f7d
FG
2348///
2349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)
17df50a5
XL
2350#[inline]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2353#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2355pub unsafe fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11
FG
2356 static_assert_uimm_bits!(LANE1, 3);
2357 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2358 match LANE1 & 0b111 {
353b0b11
FG
2359 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2360 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2361 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2362 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2363 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2364 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2365 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2366 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2367 _ => unreachable_unchecked(),
2368 }
2369}
2370
2371/// Insert vector element from another vector element
f2b60f7d
FG
2372///
2373/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)
17df50a5
XL
2374#[inline]
2375#[target_feature(enable = "neon")]
2376#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2377#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2379pub unsafe fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11
FG
2380 static_assert_uimm_bits!(LANE1, 1);
2381 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2382 match LANE1 & 0b1 {
353b0b11
FG
2383 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2384 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2385 _ => unreachable_unchecked(),
2386 }
2387}
2388
2389/// Insert vector element from another vector element
f2b60f7d
FG
2390///
2391/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)
17df50a5
XL
2392#[inline]
2393#[target_feature(enable = "neon")]
2394#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2395#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2397pub unsafe fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11
FG
2398 static_assert_uimm_bits!(LANE1, 2);
2399 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2400 match LANE1 & 0b11 {
353b0b11
FG
2401 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2402 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2403 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2404 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2405 _ => unreachable_unchecked(),
2406 }
2407}
2408
2409/// Insert vector element from another vector element
f2b60f7d
FG
2410///
2411/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)
17df50a5
XL
2412#[inline]
2413#[target_feature(enable = "neon")]
2414#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2415#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2417pub unsafe fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11
FG
2418 static_assert_uimm_bits!(LANE1, 1);
2419 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2420 match LANE1 & 0b1 {
353b0b11
FG
2421 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2422 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2423 _ => unreachable_unchecked(),
2424 }
2425}
2426
2427/// Insert vector element from another vector element
f2b60f7d
FG
2428///
2429/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)
17df50a5
XL
2430#[inline]
2431#[target_feature(enable = "neon")]
2432#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2433#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2435pub unsafe fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11
FG
2436 static_assert_uimm_bits!(LANE1, 3);
2437 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2438 match LANE1 & 0b111 {
353b0b11
FG
2439 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2440 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2441 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2442 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2443 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2444 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2445 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2446 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2447 _ => unreachable_unchecked(),
2448 }
2449}
2450
2451/// Insert vector element from another vector element
f2b60f7d
FG
2452///
2453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)
17df50a5
XL
2454#[inline]
2455#[target_feature(enable = "neon")]
2456#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2457#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2459pub unsafe fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11
FG
2460 static_assert_uimm_bits!(LANE1, 4);
2461 static_assert_uimm_bits!(LANE2, 4);
17df50a5 2462 match LANE1 & 0b1111 {
353b0b11
FG
2463 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2464 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2465 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2466 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2467 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2468 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2469 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2470 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2471 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2472 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2473 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2474 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2475 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2476 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2477 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2478 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
2479 _ => unreachable_unchecked(),
2480 }
2481}
2482
2483/// Insert vector element from another vector element
f2b60f7d
FG
2484///
2485/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)
17df50a5
XL
2486#[inline]
2487#[target_feature(enable = "neon")]
2488#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2489#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2491pub unsafe fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11
FG
2492 static_assert_uimm_bits!(LANE1, 2);
2493 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2494 match LANE1 & 0b11 {
353b0b11
FG
2495 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2496 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2497 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2498 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2499 _ => unreachable_unchecked(),
2500 }
2501}
2502
2503/// Insert vector element from another vector element
f2b60f7d
FG
2504///
2505/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)
17df50a5
XL
2506#[inline]
2507#[target_feature(enable = "neon")]
2508#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2509#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2511pub unsafe fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11
FG
2512 static_assert_uimm_bits!(LANE1, 3);
2513 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2514 match LANE1 & 0b111 {
353b0b11
FG
2515 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2516 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2517 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2518 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2519 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2520 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2521 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2522 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2523 _ => unreachable_unchecked(),
2524 }
2525}
2526
2527/// Insert vector element from another vector element
f2b60f7d
FG
2528///
2529/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)
17df50a5
XL
2530#[inline]
2531#[target_feature(enable = "neon")]
2532#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2533#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2535pub unsafe fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11
FG
2536 static_assert_uimm_bits!(LANE1, 1);
2537 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2538 match LANE1 & 0b1 {
353b0b11
FG
2539 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2540 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2541 _ => unreachable_unchecked(),
2542 }
2543}
2544
2545/// Insert vector element from another vector element
f2b60f7d
FG
2546///
2547/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)
17df50a5
XL
2548#[inline]
2549#[target_feature(enable = "neon")]
2550#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2551#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2553pub unsafe fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11
FG
2554 static_assert_uimm_bits!(LANE1, 2);
2555 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2556 match LANE1 & 0b11 {
353b0b11
FG
2557 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2558 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2559 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2560 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2561 _ => unreachable_unchecked(),
2562 }
2563}
2564
2565/// Insert vector element from another vector element
f2b60f7d
FG
2566///
2567/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)
17df50a5
XL
2568#[inline]
2569#[target_feature(enable = "neon")]
2570#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2571#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2573pub unsafe fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11
FG
2574 static_assert_uimm_bits!(LANE1, 1);
2575 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2576 match LANE1 & 0b1 {
353b0b11
FG
2577 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2578 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2579 _ => unreachable_unchecked(),
2580 }
2581}
2582
2583/// Insert vector element from another vector element
f2b60f7d
FG
2584///
2585/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)
17df50a5
XL
2586#[inline]
2587#[target_feature(enable = "neon")]
2588#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2589#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2591pub unsafe fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11
FG
2592 static_assert_uimm_bits!(LANE1, 3);
2593 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2594 match LANE1 & 0b111 {
353b0b11
FG
2595 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2596 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2597 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2598 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2599 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2600 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2601 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2602 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2603 _ => unreachable_unchecked(),
2604 }
2605}
2606
2607/// Insert vector element from another vector element
f2b60f7d
FG
2608///
2609/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)
17df50a5
XL
2610#[inline]
2611#[target_feature(enable = "neon")]
2612#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2613#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2615pub unsafe fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11
FG
2616 static_assert_uimm_bits!(LANE1, 4);
2617 static_assert_uimm_bits!(LANE2, 4);
17df50a5 2618 match LANE1 & 0b1111 {
353b0b11
FG
2619 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2620 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2621 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2622 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2623 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2624 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2625 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2626 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2627 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2628 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2629 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2630 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2631 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2632 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2633 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2634 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
2635 _ => unreachable_unchecked(),
2636 }
2637}
2638
2639/// Insert vector element from another vector element
f2b60f7d
FG
2640///
2641/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)
17df50a5
XL
2642#[inline]
2643#[target_feature(enable = "neon")]
2644#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2645#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2647pub unsafe fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11
FG
2648 static_assert_uimm_bits!(LANE1, 2);
2649 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2650 match LANE1 & 0b11 {
353b0b11
FG
2651 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2652 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2653 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2654 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2655 _ => unreachable_unchecked(),
2656 }
2657}
2658
2659/// Insert vector element from another vector element
f2b60f7d
FG
2660///
2661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)
17df50a5
XL
2662#[inline]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2665#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2667pub unsafe fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11
FG
2668 static_assert_uimm_bits!(LANE1, 3);
2669 static_assert_uimm_bits!(LANE2, 3);
17df50a5 2670 match LANE1 & 0b111 {
353b0b11
FG
2671 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2672 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2673 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2674 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2675 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2676 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2677 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2678 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
2679 _ => unreachable_unchecked(),
2680 }
2681}
2682
2683/// Insert vector element from another vector element
f2b60f7d
FG
2684///
2685/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)
17df50a5
XL
2686#[inline]
2687#[target_feature(enable = "neon")]
2688#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2689#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2691pub unsafe fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11
FG
2692 static_assert_uimm_bits!(LANE1, 1);
2693 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2694 match LANE1 & 0b1 {
353b0b11
FG
2695 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2696 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2697 _ => unreachable_unchecked(),
2698 }
2699}
2700
2701/// Insert vector element from another vector element
f2b60f7d
FG
2702///
2703/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)
17df50a5
XL
2704#[inline]
2705#[target_feature(enable = "neon")]
2706#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2707#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2709pub unsafe fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11
FG
2710 static_assert_uimm_bits!(LANE1, 1);
2711 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2712 match LANE1 & 0b1 {
353b0b11
FG
2713 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2714 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2715 _ => unreachable_unchecked(),
2716 }
2717}
2718
2719/// Insert vector element from another vector element
f2b60f7d
FG
2720///
2721/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)
17df50a5
XL
2722#[inline]
2723#[target_feature(enable = "neon")]
2724#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2725#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2727pub unsafe fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11
FG
2728 static_assert_uimm_bits!(LANE1, 2);
2729 static_assert_uimm_bits!(LANE2, 2);
17df50a5 2730 match LANE1 & 0b11 {
353b0b11
FG
2731 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2732 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2733 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2734 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
2735 _ => unreachable_unchecked(),
2736 }
2737}
2738
2739/// Insert vector element from another vector element
f2b60f7d
FG
2740///
2741/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)
17df50a5
XL
2742#[inline]
2743#[target_feature(enable = "neon")]
2744#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2745#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2747pub unsafe fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11
FG
2748 static_assert_uimm_bits!(LANE1, 1);
2749 static_assert_uimm_bits!(LANE2, 1);
17df50a5 2750 match LANE1 & 0b1 {
353b0b11
FG
2751 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2752 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
2753 _ => unreachable_unchecked(),
2754 }
2755}
2756
2757/// Insert vector element from another vector element
f2b60f7d
FG
2758///
2759/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)
17df50a5
XL
2760#[inline]
2761#[target_feature(enable = "neon")]
2762#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2763#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2765pub unsafe fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
353b0b11
FG
2766 static_assert_uimm_bits!(LANE1, 3);
2767 static_assert_uimm_bits!(LANE2, 4);
2768 let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 2769 match LANE1 & 0b111 {
353b0b11
FG
2770 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2771 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2772 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2773 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2774 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2775 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2776 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2777 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
17df50a5
XL
2778 _ => unreachable_unchecked(),
2779 }
2780}
2781
2782/// Insert vector element from another vector element
f2b60f7d
FG
2783///
2784/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)
17df50a5
XL
2785#[inline]
2786#[target_feature(enable = "neon")]
2787#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2788#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2790pub unsafe fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
353b0b11
FG
2791 static_assert_uimm_bits!(LANE1, 2);
2792 static_assert_uimm_bits!(LANE2, 3);
2793 let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 2794 match LANE1 & 0b11 {
353b0b11
FG
2795 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2796 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2797 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2798 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
17df50a5
XL
2799 _ => unreachable_unchecked(),
2800 }
2801}
2802
2803/// Insert vector element from another vector element
f2b60f7d
FG
2804///
2805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)
17df50a5
XL
2806#[inline]
2807#[target_feature(enable = "neon")]
2808#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2809#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2811pub unsafe fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
353b0b11
FG
2812 static_assert_uimm_bits!(LANE1, 1);
2813 static_assert_uimm_bits!(LANE2, 2);
2814 let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
17df50a5 2815 match LANE1 & 0b1 {
353b0b11
FG
2816 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2817 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
17df50a5
XL
2818 _ => unreachable_unchecked(),
2819 }
2820}
2821
2822/// Insert vector element from another vector element
f2b60f7d
FG
2823///
2824/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)
17df50a5
XL
2825#[inline]
2826#[target_feature(enable = "neon")]
2827#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2828#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2830pub unsafe fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x8_t {
353b0b11
FG
2831 static_assert_uimm_bits!(LANE1, 3);
2832 static_assert_uimm_bits!(LANE2, 4);
2833 let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 2834 match LANE1 & 0b111 {
353b0b11
FG
2835 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2836 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2837 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2838 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2839 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2840 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2841 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2842 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
17df50a5
XL
2843 _ => unreachable_unchecked(),
2844 }
2845}
2846
2847/// Insert vector element from another vector element
f2b60f7d
FG
2848///
2849/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)
17df50a5
XL
2850#[inline]
2851#[target_feature(enable = "neon")]
2852#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2853#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2855pub unsafe fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t {
353b0b11
FG
2856 static_assert_uimm_bits!(LANE1, 2);
2857 static_assert_uimm_bits!(LANE2, 3);
2858 let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 2859 match LANE1 & 0b11 {
353b0b11
FG
2860 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2861 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2862 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2863 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
17df50a5
XL
2864 _ => unreachable_unchecked(),
2865 }
2866}
2867
2868/// Insert vector element from another vector element
f2b60f7d
FG
2869///
2870/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)
17df50a5
XL
2871#[inline]
2872#[target_feature(enable = "neon")]
2873#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2874#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2876pub unsafe fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t {
353b0b11
FG
2877 static_assert_uimm_bits!(LANE1, 1);
2878 static_assert_uimm_bits!(LANE2, 2);
2879 let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
17df50a5 2880 match LANE1 & 0b1 {
353b0b11
FG
2881 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2882 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
17df50a5
XL
2883 _ => unreachable_unchecked(),
2884 }
2885}
2886
2887/// Insert vector element from another vector element
f2b60f7d
FG
2888///
2889/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)
17df50a5
XL
2890#[inline]
2891#[target_feature(enable = "neon")]
2892#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2893#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2895pub unsafe fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x16_t) -> poly8x8_t {
353b0b11
FG
2896 static_assert_uimm_bits!(LANE1, 3);
2897 static_assert_uimm_bits!(LANE2, 4);
2898 let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 2899 match LANE1 & 0b111 {
353b0b11
FG
2900 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2901 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2902 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2903 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2904 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2905 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2906 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2907 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
17df50a5
XL
2908 _ => unreachable_unchecked(),
2909 }
2910}
2911
2912/// Insert vector element from another vector element
f2b60f7d
FG
2913///
2914/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)
17df50a5
XL
2915#[inline]
2916#[target_feature(enable = "neon")]
2917#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2918#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2920pub unsafe fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x8_t) -> poly16x4_t {
353b0b11
FG
2921 static_assert_uimm_bits!(LANE1, 2);
2922 static_assert_uimm_bits!(LANE2, 3);
2923 let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 2924 match LANE1 & 0b11 {
353b0b11
FG
2925 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2926 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2927 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2928 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
17df50a5
XL
2929 _ => unreachable_unchecked(),
2930 }
2931}
2932
2933/// Insert vector element from another vector element
f2b60f7d
FG
2934///
2935/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)
17df50a5
XL
2936#[inline]
2937#[target_feature(enable = "neon")]
2938#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2939#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2941pub unsafe fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
353b0b11
FG
2942 static_assert_uimm_bits!(LANE1, 1);
2943 static_assert_uimm_bits!(LANE2, 2);
2944 let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
17df50a5 2945 match LANE1 & 0b1 {
353b0b11
FG
2946 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2947 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
17df50a5
XL
2948 _ => unreachable_unchecked(),
2949 }
2950}
2951
2952/// Insert vector element from another vector element
f2b60f7d
FG
2953///
2954/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)
17df50a5
XL
2955#[inline]
2956#[target_feature(enable = "neon")]
2957#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2958#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2960pub unsafe fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
353b0b11
FG
2961 static_assert_uimm_bits!(LANE1, 4);
2962 static_assert_uimm_bits!(LANE2, 3);
2963 let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 2964 match LANE1 & 0b1111 {
353b0b11
FG
2965 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2966 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2967 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2968 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2969 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2970 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2971 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2972 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2973 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2974 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2975 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2976 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2977 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2978 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2979 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2980 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
2981 _ => unreachable_unchecked(),
2982 }
2983}
2984
2985/// Insert vector element from another vector element
f2b60f7d
FG
2986///
2987/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)
17df50a5
XL
2988#[inline]
2989#[target_feature(enable = "neon")]
2990#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2991#[rustc_legacy_const_generics(1, 3)]
a2a8927a 2992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 2993pub unsafe fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
353b0b11
FG
2994 static_assert_uimm_bits!(LANE1, 3);
2995 static_assert_uimm_bits!(LANE2, 2);
2996 let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 2997 match LANE1 & 0b111 {
353b0b11
FG
2998 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2999 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3000 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3001 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3002 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3003 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3004 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3005 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
3006 _ => unreachable_unchecked(),
3007 }
3008}
3009
3010/// Insert vector element from another vector element
f2b60f7d
FG
3011///
3012/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)
17df50a5
XL
3013#[inline]
3014#[target_feature(enable = "neon")]
3015#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3016#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3018pub unsafe fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
353b0b11
FG
3019 static_assert_uimm_bits!(LANE1, 2);
3020 static_assert_uimm_bits!(LANE2, 1);
3021 let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
17df50a5 3022 match LANE1 & 0b11 {
353b0b11
FG
3023 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3024 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3025 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3026 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
3027 _ => unreachable_unchecked(),
3028 }
3029}
3030
3031/// Insert vector element from another vector element
f2b60f7d
FG
3032///
3033/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)
17df50a5
XL
3034#[inline]
3035#[target_feature(enable = "neon")]
3036#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3037#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3039pub unsafe fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
353b0b11
FG
3040 static_assert_uimm_bits!(LANE1, 4);
3041 static_assert_uimm_bits!(LANE2, 3);
3042 let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 3043 match LANE1 & 0b1111 {
353b0b11
FG
3044 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3045 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3046 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3047 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3048 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3049 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3050 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3051 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
3052 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
3053 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
3054 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
3055 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
3056 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
3057 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
3058 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
3059 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
3060 _ => unreachable_unchecked(),
3061 }
3062}
3063
3064/// Insert vector element from another vector element
f2b60f7d
FG
3065///
3066/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)
17df50a5
XL
3067#[inline]
3068#[target_feature(enable = "neon")]
3069#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3070#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3072pub unsafe fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t {
353b0b11
FG
3073 static_assert_uimm_bits!(LANE1, 3);
3074 static_assert_uimm_bits!(LANE2, 2);
3075 let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 3076 match LANE1 & 0b111 {
353b0b11
FG
3077 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
3078 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3079 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3080 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3081 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3082 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3083 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3084 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
3085 _ => unreachable_unchecked(),
3086 }
3087}
3088
3089/// Insert vector element from another vector element
f2b60f7d
FG
3090///
3091/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)
17df50a5
XL
3092#[inline]
3093#[target_feature(enable = "neon")]
3094#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3095#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3097pub unsafe fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t {
353b0b11
FG
3098 static_assert_uimm_bits!(LANE1, 2);
3099 static_assert_uimm_bits!(LANE2, 1);
3100 let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
17df50a5 3101 match LANE1 & 0b11 {
353b0b11
FG
3102 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3103 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3104 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3105 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
3106 _ => unreachable_unchecked(),
3107 }
3108}
3109
3110/// Insert vector element from another vector element
f2b60f7d
FG
3111///
3112/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)
17df50a5
XL
3113#[inline]
3114#[target_feature(enable = "neon")]
3115#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3116#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3118pub unsafe fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x8_t) -> poly8x16_t {
353b0b11
FG
3119 static_assert_uimm_bits!(LANE1, 4);
3120 static_assert_uimm_bits!(LANE2, 3);
3121 let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5 3122 match LANE1 & 0b1111 {
353b0b11
FG
3123 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3124 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3125 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3126 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3127 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3128 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3129 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3130 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
3131 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
3132 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
3133 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
3134 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
3135 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
3136 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
3137 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
3138 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
17df50a5
XL
3139 _ => unreachable_unchecked(),
3140 }
3141}
3142
3143/// Insert vector element from another vector element
f2b60f7d
FG
3144///
3145/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)
17df50a5
XL
3146#[inline]
3147#[target_feature(enable = "neon")]
3148#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3149#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3151pub unsafe fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x4_t) -> poly16x8_t {
353b0b11
FG
3152 static_assert_uimm_bits!(LANE1, 3);
3153 static_assert_uimm_bits!(LANE2, 2);
3154 let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
17df50a5 3155 match LANE1 & 0b111 {
353b0b11
FG
3156 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
3157 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3158 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3159 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3160 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3161 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3162 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3163 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
17df50a5
XL
3164 _ => unreachable_unchecked(),
3165 }
3166}
3167
3168/// Insert vector element from another vector element
f2b60f7d
FG
3169///
3170/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)
17df50a5
XL
3171#[inline]
3172#[target_feature(enable = "neon")]
5e7ed085 3173#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
17df50a5 3174#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3176pub unsafe fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x1_t) -> int64x2_t {
353b0b11
FG
3177 static_assert_uimm_bits!(LANE1, 1);
3178 static_assert!(LANE2 == 0);
3179 let b: int64x2_t = simd_shuffle!(b, b, [0, 1]);
17df50a5 3180 match LANE1 & 0b1 {
353b0b11
FG
3181 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3182 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
3183 _ => unreachable_unchecked(),
3184 }
3185}
3186
3187/// Insert vector element from another vector element
f2b60f7d
FG
3188///
3189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)
17df50a5
XL
3190#[inline]
3191#[target_feature(enable = "neon")]
5e7ed085 3192#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
17df50a5 3193#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3195pub unsafe fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x1_t) -> uint64x2_t {
353b0b11
FG
3196 static_assert_uimm_bits!(LANE1, 1);
3197 static_assert!(LANE2 == 0);
3198 let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]);
17df50a5 3199 match LANE1 & 0b1 {
353b0b11
FG
3200 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3201 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
3202 _ => unreachable_unchecked(),
3203 }
3204}
3205
3206/// Insert vector element from another vector element
f2b60f7d
FG
3207///
3208/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)
17df50a5
XL
3209#[inline]
3210#[target_feature(enable = "neon")]
5e7ed085 3211#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
17df50a5 3212#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3214pub unsafe fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x1_t) -> poly64x2_t {
353b0b11
FG
3215 static_assert_uimm_bits!(LANE1, 1);
3216 static_assert!(LANE2 == 0);
3217 let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]);
17df50a5 3218 match LANE1 & 0b1 {
353b0b11
FG
3219 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3220 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
3221 _ => unreachable_unchecked(),
3222 }
3223}
3224
3225/// Insert vector element from another vector element
f2b60f7d
FG
3226///
3227/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)
17df50a5
XL
3228#[inline]
3229#[target_feature(enable = "neon")]
3230#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3231#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3233pub unsafe fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
353b0b11
FG
3234 static_assert_uimm_bits!(LANE1, 2);
3235 static_assert_uimm_bits!(LANE2, 1);
3236 let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
17df50a5 3237 match LANE1 & 0b11 {
353b0b11
FG
3238 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3239 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3240 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3241 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
17df50a5
XL
3242 _ => unreachable_unchecked(),
3243 }
3244}
3245
3246/// Insert vector element from another vector element
f2b60f7d
FG
3247///
3248/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)
17df50a5
XL
3249#[inline]
3250#[target_feature(enable = "neon")]
5e7ed085 3251#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
17df50a5 3252#[rustc_legacy_const_generics(1, 3)]
a2a8927a 3253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3254pub unsafe fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
353b0b11
FG
3255 static_assert_uimm_bits!(LANE1, 1);
3256 static_assert!(LANE2 == 0);
3257 let b: float64x2_t = simd_shuffle!(b, b, [0, 1]);
17df50a5 3258 match LANE1 & 0b1 {
353b0b11
FG
3259 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3260 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
17df50a5
XL
3261 _ => unreachable_unchecked(),
3262 }
3263}
3264
3265/// Insert vector element from another vector element
f2b60f7d
FG
3266///
3267/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)
17df50a5
XL
3268#[inline]
3269#[target_feature(enable = "neon")]
3270#[cfg_attr(test, assert_instr(nop))]
a2a8927a 3271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3272pub unsafe fn vcreate_f64(a: u64) -> float64x1_t {
3273 transmute(a)
3274}
3275
3276/// Fixed-point convert to floating-point
f2b60f7d
FG
3277///
3278/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)
17df50a5
XL
3279#[inline]
3280#[target_feature(enable = "neon")]
3281#[cfg_attr(test, assert_instr(scvtf))]
a2a8927a 3282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3283pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
3284 simd_cast(a)
3285}
3286
3287/// Fixed-point convert to floating-point
f2b60f7d
FG
3288///
3289/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)
17df50a5
XL
3290#[inline]
3291#[target_feature(enable = "neon")]
3292#[cfg_attr(test, assert_instr(scvtf))]
a2a8927a 3293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3294pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
3295 simd_cast(a)
3296}
3297
3298/// Fixed-point convert to floating-point
f2b60f7d
FG
3299///
3300/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)
17df50a5
XL
3301#[inline]
3302#[target_feature(enable = "neon")]
3303#[cfg_attr(test, assert_instr(ucvtf))]
a2a8927a 3304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3305pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
3306 simd_cast(a)
3307}
3308
3309/// Fixed-point convert to floating-point
f2b60f7d
FG
3310///
3311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)
17df50a5
XL
3312#[inline]
3313#[target_feature(enable = "neon")]
3314#[cfg_attr(test, assert_instr(ucvtf))]
a2a8927a 3315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3316pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
3317 simd_cast(a)
3318}
3319
3320/// Floating-point convert to higher precision long
f2b60f7d
FG
3321///
3322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)
17df50a5
XL
3323#[inline]
3324#[target_feature(enable = "neon")]
3325#[cfg_attr(test, assert_instr(fcvtl))]
a2a8927a 3326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3327pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
3328 simd_cast(a)
3329}
3330
3331/// Floating-point convert to higher precision long
f2b60f7d
FG
3332///
3333/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)
17df50a5
XL
3334#[inline]
3335#[target_feature(enable = "neon")]
3336#[cfg_attr(test, assert_instr(fcvtl))]
a2a8927a 3337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3338pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
353b0b11 3339 let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
17df50a5
XL
3340 simd_cast(b)
3341}
3342
3343/// Floating-point convert to lower precision narrow
f2b60f7d
FG
3344///
3345/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)
17df50a5
XL
3346#[inline]
3347#[target_feature(enable = "neon")]
3348#[cfg_attr(test, assert_instr(fcvtn))]
a2a8927a 3349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3350pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
3351 simd_cast(a)
3352}
3353
3354/// Floating-point convert to lower precision narrow
f2b60f7d
FG
3355///
3356/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)
17df50a5
XL
3357#[inline]
3358#[target_feature(enable = "neon")]
3359#[cfg_attr(test, assert_instr(fcvtn))]
a2a8927a 3360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3361pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
353b0b11 3362 simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3])
17df50a5
XL
3363}
3364
3365/// Floating-point convert to lower precision narrow, rounding to odd
f2b60f7d
FG
3366///
3367/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)
17df50a5
XL
3368#[inline]
3369#[target_feature(enable = "neon")]
3370#[cfg_attr(test, assert_instr(fcvtxn))]
a2a8927a 3371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3372pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
3373 #[allow(improper_ctypes)]
c295e0f8 3374 extern "unadjusted" {
17df50a5
XL
3375 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64")]
3376 fn vcvtx_f32_f64_(a: float64x2_t) -> float32x2_t;
3377 }
3378 vcvtx_f32_f64_(a)
3379}
3380
3c0e092e 3381/// Floating-point convert to lower precision narrow, rounding to odd
f2b60f7d
FG
3382///
3383/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)
3c0e092e
XL
3384#[inline]
3385#[target_feature(enable = "neon")]
3386#[cfg_attr(test, assert_instr(fcvtxn))]
a2a8927a 3387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
3388pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 {
3389 simd_extract(vcvtx_f32_f64(vdupq_n_f64(a)), 0)
3390}
3391
17df50a5 3392/// Floating-point convert to lower precision narrow, rounding to odd
f2b60f7d
FG
3393///
3394/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)
17df50a5
XL
3395#[inline]
3396#[target_feature(enable = "neon")]
3397#[cfg_attr(test, assert_instr(fcvtxn))]
a2a8927a 3398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3399pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
353b0b11 3400 simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3])
17df50a5
XL
3401}
3402
3403/// Fixed-point convert to floating-point
f2b60f7d
FG
3404///
3405/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)
17df50a5
XL
3406#[inline]
3407#[target_feature(enable = "neon")]
3408#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3409#[rustc_legacy_const_generics(1)]
a2a8927a 3410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3411pub unsafe fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
353b0b11 3412 static_assert!(N >= 1 && N <= 64);
17df50a5 3413 #[allow(improper_ctypes)]
c295e0f8 3414 extern "unadjusted" {
17df50a5
XL
3415 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64")]
3416 fn vcvt_n_f64_s64_(a: int64x1_t, n: i32) -> float64x1_t;
3417 }
3418 vcvt_n_f64_s64_(a, N)
3419}
3420
3421/// Fixed-point convert to floating-point
f2b60f7d
FG
3422///
3423/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)
17df50a5
XL
3424#[inline]
3425#[target_feature(enable = "neon")]
3426#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3427#[rustc_legacy_const_generics(1)]
a2a8927a 3428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3429pub unsafe fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
353b0b11 3430 static_assert!(N >= 1 && N <= 64);
17df50a5 3431 #[allow(improper_ctypes)]
c295e0f8 3432 extern "unadjusted" {
17df50a5
XL
3433 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64")]
3434 fn vcvtq_n_f64_s64_(a: int64x2_t, n: i32) -> float64x2_t;
3435 }
3436 vcvtq_n_f64_s64_(a, N)
3437}
3438
3439/// Fixed-point convert to floating-point
f2b60f7d
FG
3440///
3441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)
17df50a5
XL
3442#[inline]
3443#[target_feature(enable = "neon")]
3444#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3445#[rustc_legacy_const_generics(1)]
a2a8927a 3446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3447pub unsafe fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
353b0b11 3448 static_assert!(N >= 1 && N <= 32);
17df50a5 3449 #[allow(improper_ctypes)]
c295e0f8 3450 extern "unadjusted" {
17df50a5
XL
3451 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32")]
3452 fn vcvts_n_f32_s32_(a: i32, n: i32) -> f32;
3453 }
3454 vcvts_n_f32_s32_(a, N)
3455}
3456
3457/// Fixed-point convert to floating-point
f2b60f7d
FG
3458///
3459/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)
17df50a5
XL
3460#[inline]
3461#[target_feature(enable = "neon")]
3462#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3463#[rustc_legacy_const_generics(1)]
a2a8927a 3464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3465pub unsafe fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
353b0b11 3466 static_assert!(N >= 1 && N <= 64);
17df50a5 3467 #[allow(improper_ctypes)]
c295e0f8 3468 extern "unadjusted" {
17df50a5
XL
3469 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64")]
3470 fn vcvtd_n_f64_s64_(a: i64, n: i32) -> f64;
3471 }
3472 vcvtd_n_f64_s64_(a, N)
3473}
3474
3475/// Fixed-point convert to floating-point
f2b60f7d
FG
3476///
3477/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)
17df50a5
XL
3478#[inline]
3479#[target_feature(enable = "neon")]
3480#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3481#[rustc_legacy_const_generics(1)]
a2a8927a 3482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3483pub unsafe fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
353b0b11 3484 static_assert!(N >= 1 && N <= 64);
17df50a5 3485 #[allow(improper_ctypes)]
c295e0f8 3486 extern "unadjusted" {
17df50a5
XL
3487 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64")]
3488 fn vcvt_n_f64_u64_(a: uint64x1_t, n: i32) -> float64x1_t;
3489 }
3490 vcvt_n_f64_u64_(a, N)
3491}
3492
3493/// Fixed-point convert to floating-point
f2b60f7d
FG
3494///
3495/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)
17df50a5
XL
3496#[inline]
3497#[target_feature(enable = "neon")]
3498#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3499#[rustc_legacy_const_generics(1)]
a2a8927a 3500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3501pub unsafe fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
353b0b11 3502 static_assert!(N >= 1 && N <= 64);
17df50a5 3503 #[allow(improper_ctypes)]
c295e0f8 3504 extern "unadjusted" {
17df50a5
XL
3505 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64")]
3506 fn vcvtq_n_f64_u64_(a: uint64x2_t, n: i32) -> float64x2_t;
3507 }
3508 vcvtq_n_f64_u64_(a, N)
3509}
3510
3511/// Fixed-point convert to floating-point
f2b60f7d
FG
3512///
3513/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)
17df50a5
XL
3514#[inline]
3515#[target_feature(enable = "neon")]
3516#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3517#[rustc_legacy_const_generics(1)]
a2a8927a 3518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3519pub unsafe fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
353b0b11 3520 static_assert!(N >= 1 && N <= 32);
17df50a5 3521 #[allow(improper_ctypes)]
c295e0f8 3522 extern "unadjusted" {
17df50a5
XL
3523 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32")]
3524 fn vcvts_n_f32_u32_(a: u32, n: i32) -> f32;
3525 }
3526 vcvts_n_f32_u32_(a, N)
3527}
3528
3529/// Fixed-point convert to floating-point
f2b60f7d
FG
3530///
3531/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)
17df50a5
XL
3532#[inline]
3533#[target_feature(enable = "neon")]
3534#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3535#[rustc_legacy_const_generics(1)]
a2a8927a 3536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3537pub unsafe fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
353b0b11 3538 static_assert!(N >= 1 && N <= 64);
17df50a5 3539 #[allow(improper_ctypes)]
c295e0f8 3540 extern "unadjusted" {
17df50a5
XL
3541 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64")]
3542 fn vcvtd_n_f64_u64_(a: u64, n: i32) -> f64;
3543 }
3544 vcvtd_n_f64_u64_(a, N)
3545}
3546
3547/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3548///
3549/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)
17df50a5
XL
3550#[inline]
3551#[target_feature(enable = "neon")]
3552#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3553#[rustc_legacy_const_generics(1)]
a2a8927a 3554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3555pub unsafe fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
353b0b11 3556 static_assert!(N >= 1 && N <= 64);
17df50a5 3557 #[allow(improper_ctypes)]
c295e0f8 3558 extern "unadjusted" {
17df50a5
XL
3559 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64")]
3560 fn vcvt_n_s64_f64_(a: float64x1_t, n: i32) -> int64x1_t;
3561 }
3562 vcvt_n_s64_f64_(a, N)
3563}
3564
3565/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3566///
3567/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)
17df50a5
XL
3568#[inline]
3569#[target_feature(enable = "neon")]
3570#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3571#[rustc_legacy_const_generics(1)]
a2a8927a 3572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3573pub unsafe fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
353b0b11 3574 static_assert!(N >= 1 && N <= 64);
17df50a5 3575 #[allow(improper_ctypes)]
c295e0f8 3576 extern "unadjusted" {
17df50a5
XL
3577 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64")]
3578 fn vcvtq_n_s64_f64_(a: float64x2_t, n: i32) -> int64x2_t;
3579 }
3580 vcvtq_n_s64_f64_(a, N)
3581}
3582
3583/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3584///
3585/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)
17df50a5
XL
3586#[inline]
3587#[target_feature(enable = "neon")]
3588#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3589#[rustc_legacy_const_generics(1)]
a2a8927a 3590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3591pub unsafe fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
353b0b11 3592 static_assert!(N >= 1 && N <= 32);
17df50a5 3593 #[allow(improper_ctypes)]
c295e0f8 3594 extern "unadjusted" {
17df50a5
XL
3595 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32")]
3596 fn vcvts_n_s32_f32_(a: f32, n: i32) -> i32;
3597 }
3598 vcvts_n_s32_f32_(a, N)
3599}
3600
3601/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3602///
3603/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)
17df50a5
XL
3604#[inline]
3605#[target_feature(enable = "neon")]
3606#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3607#[rustc_legacy_const_generics(1)]
a2a8927a 3608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3609pub unsafe fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
353b0b11 3610 static_assert!(N >= 1 && N <= 64);
17df50a5 3611 #[allow(improper_ctypes)]
c295e0f8 3612 extern "unadjusted" {
17df50a5
XL
3613 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64")]
3614 fn vcvtd_n_s64_f64_(a: f64, n: i32) -> i64;
3615 }
3616 vcvtd_n_s64_f64_(a, N)
3617}
3618
3619/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3620///
3621/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)
17df50a5
XL
3622#[inline]
3623#[target_feature(enable = "neon")]
3624#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3625#[rustc_legacy_const_generics(1)]
a2a8927a 3626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3627pub unsafe fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
353b0b11 3628 static_assert!(N >= 1 && N <= 64);
17df50a5 3629 #[allow(improper_ctypes)]
c295e0f8 3630 extern "unadjusted" {
17df50a5
XL
3631 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64")]
3632 fn vcvt_n_u64_f64_(a: float64x1_t, n: i32) -> uint64x1_t;
3633 }
3634 vcvt_n_u64_f64_(a, N)
3635}
3636
3637/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3638///
3639/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)
17df50a5
XL
3640#[inline]
3641#[target_feature(enable = "neon")]
3642#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3643#[rustc_legacy_const_generics(1)]
a2a8927a 3644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3645pub unsafe fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
353b0b11 3646 static_assert!(N >= 1 && N <= 64);
17df50a5 3647 #[allow(improper_ctypes)]
c295e0f8 3648 extern "unadjusted" {
17df50a5
XL
3649 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64")]
3650 fn vcvtq_n_u64_f64_(a: float64x2_t, n: i32) -> uint64x2_t;
3651 }
3652 vcvtq_n_u64_f64_(a, N)
3653}
3654
3655/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3656///
3657/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)
17df50a5
XL
3658#[inline]
3659#[target_feature(enable = "neon")]
3660#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3661#[rustc_legacy_const_generics(1)]
a2a8927a 3662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3663pub unsafe fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
353b0b11 3664 static_assert!(N >= 1 && N <= 32);
17df50a5 3665 #[allow(improper_ctypes)]
c295e0f8 3666 extern "unadjusted" {
17df50a5
XL
3667 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32")]
3668 fn vcvts_n_u32_f32_(a: f32, n: i32) -> u32;
3669 }
3670 vcvts_n_u32_f32_(a, N)
3671}
3672
3673/// Floating-point convert to fixed-point, rounding toward zero
f2b60f7d
FG
3674///
3675/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)
17df50a5
XL
3676#[inline]
3677#[target_feature(enable = "neon")]
3678#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3679#[rustc_legacy_const_generics(1)]
a2a8927a 3680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3681pub unsafe fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
353b0b11 3682 static_assert!(N >= 1 && N <= 64);
17df50a5 3683 #[allow(improper_ctypes)]
c295e0f8 3684 extern "unadjusted" {
17df50a5
XL
3685 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64")]
3686 fn vcvtd_n_u64_f64_(a: f64, n: i32) -> u64;
3687 }
3688 vcvtd_n_u64_f64_(a, N)
3689}
3690
3691/// Fixed-point convert to floating-point
f2b60f7d
FG
3692///
3693/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)
17df50a5
XL
3694#[inline]
3695#[target_feature(enable = "neon")]
3696#[cfg_attr(test, assert_instr(scvtf))]
a2a8927a 3697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3698pub unsafe fn vcvts_f32_s32(a: i32) -> f32 {
3699 a as f32
3700}
3701
3702/// Fixed-point convert to floating-point
f2b60f7d
FG
3703///
3704/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)
17df50a5
XL
3705#[inline]
3706#[target_feature(enable = "neon")]
3707#[cfg_attr(test, assert_instr(scvtf))]
a2a8927a 3708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3709pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 {
3710 a as f64
3711}
3712
3713/// Fixed-point convert to floating-point
f2b60f7d
FG
3714///
3715/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)
17df50a5
XL
3716#[inline]
3717#[target_feature(enable = "neon")]
3718#[cfg_attr(test, assert_instr(ucvtf))]
a2a8927a 3719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3720pub unsafe fn vcvts_f32_u32(a: u32) -> f32 {
3721 a as f32
3722}
3723
3724/// Fixed-point convert to floating-point
f2b60f7d
FG
3725///
3726/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)
17df50a5
XL
3727#[inline]
3728#[target_feature(enable = "neon")]
3729#[cfg_attr(test, assert_instr(ucvtf))]
a2a8927a 3730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3731pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 {
3732 a as f64
3733}
3734
3735/// Fixed-point convert to floating-point
f2b60f7d
FG
3736///
3737/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)
17df50a5
XL
3738#[inline]
3739#[target_feature(enable = "neon")]
3740#[cfg_attr(test, assert_instr(fcvtzs))]
a2a8927a 3741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3742pub unsafe fn vcvts_s32_f32(a: f32) -> i32 {
3743 a as i32
3744}
3745
3746/// Fixed-point convert to floating-point
f2b60f7d
FG
3747///
3748/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)
17df50a5
XL
3749#[inline]
3750#[target_feature(enable = "neon")]
3751#[cfg_attr(test, assert_instr(fcvtzs))]
a2a8927a 3752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3753pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 {
3754 a as i64
3755}
3756
3757/// Fixed-point convert to floating-point
f2b60f7d
FG
3758///
3759/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)
17df50a5
XL
3760#[inline]
3761#[target_feature(enable = "neon")]
3762#[cfg_attr(test, assert_instr(fcvtzu))]
a2a8927a 3763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3764pub unsafe fn vcvts_u32_f32(a: f32) -> u32 {
3765 a as u32
3766}
3767
3768/// Fixed-point convert to floating-point
f2b60f7d
FG
3769///
3770/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)
17df50a5
XL
3771#[inline]
3772#[target_feature(enable = "neon")]
3773#[cfg_attr(test, assert_instr(fcvtzu))]
a2a8927a 3774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3775pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 {
3776 a as u64
3777}
3778
3779/// Floating-point convert to signed fixed-point, rounding toward zero
f2b60f7d
FG
3780///
3781/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)
17df50a5
XL
3782#[inline]
3783#[target_feature(enable = "neon")]
3784#[cfg_attr(test, assert_instr(fcvtzs))]
a2a8927a 3785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3786pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
94222f64 3787 #[allow(improper_ctypes)]
c295e0f8 3788 extern "unadjusted" {
94222f64
XL
3789 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v1i64.v1f64")]
3790 fn vcvt_s64_f64_(a: float64x1_t) -> int64x1_t;
3791 }
3792 vcvt_s64_f64_(a)
17df50a5
XL
3793}
3794
3795/// Floating-point convert to signed fixed-point, rounding toward zero
f2b60f7d
FG
3796///
3797/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)
17df50a5
XL
3798#[inline]
3799#[target_feature(enable = "neon")]
3800#[cfg_attr(test, assert_instr(fcvtzs))]
a2a8927a 3801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3802pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
94222f64 3803 #[allow(improper_ctypes)]
c295e0f8 3804 extern "unadjusted" {
94222f64
XL
3805 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v2i64.v2f64")]
3806 fn vcvtq_s64_f64_(a: float64x2_t) -> int64x2_t;
3807 }
3808 vcvtq_s64_f64_(a)
17df50a5
XL
3809}
3810
3811/// Floating-point convert to unsigned fixed-point, rounding toward zero
f2b60f7d
FG
3812///
3813/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)
17df50a5
XL
3814#[inline]
3815#[target_feature(enable = "neon")]
3816#[cfg_attr(test, assert_instr(fcvtzu))]
a2a8927a 3817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3818pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
94222f64 3819 #[allow(improper_ctypes)]
c295e0f8 3820 extern "unadjusted" {
94222f64
XL
3821 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v1i64.v1f64")]
3822 fn vcvt_u64_f64_(a: float64x1_t) -> uint64x1_t;
3823 }
3824 vcvt_u64_f64_(a)
17df50a5
XL
3825}
3826
3827/// Floating-point convert to unsigned fixed-point, rounding toward zero
f2b60f7d
FG
3828///
3829/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)
17df50a5
XL
3830#[inline]
3831#[target_feature(enable = "neon")]
3832#[cfg_attr(test, assert_instr(fcvtzu))]
a2a8927a 3833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 3834pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
94222f64 3835 #[allow(improper_ctypes)]
c295e0f8 3836 extern "unadjusted" {
94222f64
XL
3837 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v2i64.v2f64")]
3838 fn vcvtq_u64_f64_(a: float64x2_t) -> uint64x2_t;
3839 }
3840 vcvtq_u64_f64_(a)
17df50a5
XL
3841}
3842
3843/// Floating-point convert to signed integer, rounding to nearest with ties to away
f2b60f7d
FG
3844///
3845/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)
17df50a5
XL
3846#[inline]
3847#[target_feature(enable = "neon")]
3848#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3850pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
3851 #[allow(improper_ctypes)]
c295e0f8 3852 extern "unadjusted" {
17df50a5
XL
3853 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32")]
3854 fn vcvta_s32_f32_(a: float32x2_t) -> int32x2_t;
3855 }
3856 vcvta_s32_f32_(a)
3857}
3858
3859/// Floating-point convert to signed integer, rounding to nearest with ties to away
f2b60f7d
FG
3860///
3861/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)
17df50a5
XL
3862#[inline]
3863#[target_feature(enable = "neon")]
3864#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3866pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
3867 #[allow(improper_ctypes)]
c295e0f8 3868 extern "unadjusted" {
17df50a5
XL
3869 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32")]
3870 fn vcvtaq_s32_f32_(a: float32x4_t) -> int32x4_t;
3871 }
3872 vcvtaq_s32_f32_(a)
3873}
3874
3875/// Floating-point convert to signed integer, rounding to nearest with ties to away
f2b60f7d
FG
3876///
3877/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)
17df50a5
XL
3878#[inline]
3879#[target_feature(enable = "neon")]
3880#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3882pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
3883 #[allow(improper_ctypes)]
c295e0f8 3884 extern "unadjusted" {
17df50a5
XL
3885 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64")]
3886 fn vcvta_s64_f64_(a: float64x1_t) -> int64x1_t;
3887 }
3888 vcvta_s64_f64_(a)
3889}
3890
3891/// Floating-point convert to signed integer, rounding to nearest with ties to away
f2b60f7d
FG
3892///
3893/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)
17df50a5
XL
3894#[inline]
3895#[target_feature(enable = "neon")]
3896#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3898pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
3899 #[allow(improper_ctypes)]
c295e0f8 3900 extern "unadjusted" {
17df50a5
XL
3901 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64")]
3902 fn vcvtaq_s64_f64_(a: float64x2_t) -> int64x2_t;
3903 }
3904 vcvtaq_s64_f64_(a)
3905}
3906
3907/// Floating-point convert to integer, rounding to nearest with ties to away
f2b60f7d
FG
3908///
3909/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)
17df50a5
XL
3910#[inline]
3911#[target_feature(enable = "neon")]
3912#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3914pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 {
3915 #[allow(improper_ctypes)]
c295e0f8 3916 extern "unadjusted" {
17df50a5
XL
3917 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.i32.f32")]
3918 fn vcvtas_s32_f32_(a: f32) -> i32;
3919 }
3920 vcvtas_s32_f32_(a)
3921}
3922
3923/// Floating-point convert to integer, rounding to nearest with ties to away
f2b60f7d
FG
3924///
3925/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)
17df50a5
XL
3926#[inline]
3927#[target_feature(enable = "neon")]
3928#[cfg_attr(test, assert_instr(fcvtas))]
a2a8927a 3929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3930pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 {
3931 #[allow(improper_ctypes)]
c295e0f8 3932 extern "unadjusted" {
17df50a5
XL
3933 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.i64.f64")]
3934 fn vcvtad_s64_f64_(a: f64) -> i64;
3935 }
3936 vcvtad_s64_f64_(a)
3937}
3938
3939/// Floating-point convert to integer, rounding to nearest with ties to away
f2b60f7d
FG
3940///
3941/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)
17df50a5
XL
3942#[inline]
3943#[target_feature(enable = "neon")]
3944#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 3945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3946pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 {
3947 #[allow(improper_ctypes)]
c295e0f8 3948 extern "unadjusted" {
17df50a5
XL
3949 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.i32.f32")]
3950 fn vcvtas_u32_f32_(a: f32) -> u32;
3951 }
3952 vcvtas_u32_f32_(a)
3953}
3954
3955/// Floating-point convert to integer, rounding to nearest with ties to away
f2b60f7d
FG
3956///
3957/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)
17df50a5
XL
3958#[inline]
3959#[target_feature(enable = "neon")]
3960#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 3961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3962pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 {
3963 #[allow(improper_ctypes)]
c295e0f8 3964 extern "unadjusted" {
17df50a5
XL
3965 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.i64.f64")]
3966 fn vcvtad_u64_f64_(a: f64) -> u64;
3967 }
3968 vcvtad_u64_f64_(a)
3969}
3970
3971/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
3972///
3973/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)
17df50a5
XL
3974#[inline]
3975#[target_feature(enable = "neon")]
3976#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 3977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3978pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
3979 #[allow(improper_ctypes)]
c295e0f8 3980 extern "unadjusted" {
17df50a5
XL
3981 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32")]
3982 fn vcvtn_s32_f32_(a: float32x2_t) -> int32x2_t;
3983 }
3984 vcvtn_s32_f32_(a)
3985}
3986
3987/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
3988///
3989/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)
17df50a5
XL
3990#[inline]
3991#[target_feature(enable = "neon")]
3992#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 3993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
3994pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
3995 #[allow(improper_ctypes)]
c295e0f8 3996 extern "unadjusted" {
17df50a5
XL
3997 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32")]
3998 fn vcvtnq_s32_f32_(a: float32x4_t) -> int32x4_t;
3999 }
4000 vcvtnq_s32_f32_(a)
4001}
4002
4003/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
4004///
4005/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)
17df50a5
XL
4006#[inline]
4007#[target_feature(enable = "neon")]
4008#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 4009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4010pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
4011 #[allow(improper_ctypes)]
c295e0f8 4012 extern "unadjusted" {
17df50a5
XL
4013 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64")]
4014 fn vcvtn_s64_f64_(a: float64x1_t) -> int64x1_t;
4015 }
4016 vcvtn_s64_f64_(a)
4017}
4018
4019/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
4020///
4021/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)
17df50a5
XL
4022#[inline]
4023#[target_feature(enable = "neon")]
4024#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 4025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4026pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
4027 #[allow(improper_ctypes)]
c295e0f8 4028 extern "unadjusted" {
17df50a5
XL
4029 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64")]
4030 fn vcvtnq_s64_f64_(a: float64x2_t) -> int64x2_t;
4031 }
4032 vcvtnq_s64_f64_(a)
4033}
4034
4035/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
4036///
4037/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)
17df50a5
XL
4038#[inline]
4039#[target_feature(enable = "neon")]
4040#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 4041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4042pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 {
4043 #[allow(improper_ctypes)]
c295e0f8 4044 extern "unadjusted" {
17df50a5
XL
4045 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.i32.f32")]
4046 fn vcvtns_s32_f32_(a: f32) -> i32;
4047 }
4048 vcvtns_s32_f32_(a)
4049}
4050
4051/// Floating-point convert to signed integer, rounding to nearest with ties to even
f2b60f7d
FG
4052///
4053/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)
17df50a5
XL
4054#[inline]
4055#[target_feature(enable = "neon")]
4056#[cfg_attr(test, assert_instr(fcvtns))]
a2a8927a 4057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4058pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 {
4059 #[allow(improper_ctypes)]
c295e0f8 4060 extern "unadjusted" {
17df50a5
XL
4061 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.i64.f64")]
4062 fn vcvtnd_s64_f64_(a: f64) -> i64;
4063 }
4064 vcvtnd_s64_f64_(a)
4065}
4066
4067/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4068///
4069/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)
17df50a5
XL
4070#[inline]
4071#[target_feature(enable = "neon")]
4072#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4074pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
4075 #[allow(improper_ctypes)]
c295e0f8 4076 extern "unadjusted" {
17df50a5
XL
4077 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32")]
4078 fn vcvtm_s32_f32_(a: float32x2_t) -> int32x2_t;
4079 }
4080 vcvtm_s32_f32_(a)
4081}
4082
4083/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4084///
4085/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)
17df50a5
XL
4086#[inline]
4087#[target_feature(enable = "neon")]
4088#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4090pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
4091 #[allow(improper_ctypes)]
c295e0f8 4092 extern "unadjusted" {
17df50a5
XL
4093 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32")]
4094 fn vcvtmq_s32_f32_(a: float32x4_t) -> int32x4_t;
4095 }
4096 vcvtmq_s32_f32_(a)
4097}
4098
4099/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4100///
4101/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)
17df50a5
XL
4102#[inline]
4103#[target_feature(enable = "neon")]
4104#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4106pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
4107 #[allow(improper_ctypes)]
c295e0f8 4108 extern "unadjusted" {
17df50a5
XL
4109 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64")]
4110 fn vcvtm_s64_f64_(a: float64x1_t) -> int64x1_t;
4111 }
4112 vcvtm_s64_f64_(a)
4113}
4114
4115/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4116///
4117/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)
17df50a5
XL
4118#[inline]
4119#[target_feature(enable = "neon")]
4120#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4122pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
4123 #[allow(improper_ctypes)]
c295e0f8 4124 extern "unadjusted" {
17df50a5
XL
4125 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64")]
4126 fn vcvtmq_s64_f64_(a: float64x2_t) -> int64x2_t;
4127 }
4128 vcvtmq_s64_f64_(a)
4129}
4130
4131/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4132///
4133/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)
17df50a5
XL
4134#[inline]
4135#[target_feature(enable = "neon")]
4136#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4138pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 {
4139 #[allow(improper_ctypes)]
c295e0f8 4140 extern "unadjusted" {
17df50a5
XL
4141 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.i32.f32")]
4142 fn vcvtms_s32_f32_(a: f32) -> i32;
4143 }
4144 vcvtms_s32_f32_(a)
4145}
4146
4147/// Floating-point convert to signed integer, rounding toward minus infinity
f2b60f7d
FG
4148///
4149/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)
17df50a5
XL
4150#[inline]
4151#[target_feature(enable = "neon")]
4152#[cfg_attr(test, assert_instr(fcvtms))]
a2a8927a 4153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4154pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 {
4155 #[allow(improper_ctypes)]
c295e0f8 4156 extern "unadjusted" {
17df50a5
XL
4157 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.i64.f64")]
4158 fn vcvtmd_s64_f64_(a: f64) -> i64;
4159 }
4160 vcvtmd_s64_f64_(a)
4161}
4162
4163/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4164///
4165/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)
17df50a5
XL
4166#[inline]
4167#[target_feature(enable = "neon")]
4168#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4170pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
4171 #[allow(improper_ctypes)]
c295e0f8 4172 extern "unadjusted" {
17df50a5
XL
4173 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32")]
4174 fn vcvtp_s32_f32_(a: float32x2_t) -> int32x2_t;
4175 }
4176 vcvtp_s32_f32_(a)
4177}
4178
4179/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4180///
4181/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)
17df50a5
XL
4182#[inline]
4183#[target_feature(enable = "neon")]
4184#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4186pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
4187 #[allow(improper_ctypes)]
c295e0f8 4188 extern "unadjusted" {
17df50a5
XL
4189 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32")]
4190 fn vcvtpq_s32_f32_(a: float32x4_t) -> int32x4_t;
4191 }
4192 vcvtpq_s32_f32_(a)
4193}
4194
4195/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4196///
4197/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)
17df50a5
XL
4198#[inline]
4199#[target_feature(enable = "neon")]
4200#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4202pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
4203 #[allow(improper_ctypes)]
c295e0f8 4204 extern "unadjusted" {
17df50a5
XL
4205 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64")]
4206 fn vcvtp_s64_f64_(a: float64x1_t) -> int64x1_t;
4207 }
4208 vcvtp_s64_f64_(a)
4209}
4210
4211/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4212///
4213/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)
17df50a5
XL
4214#[inline]
4215#[target_feature(enable = "neon")]
4216#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4218pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
4219 #[allow(improper_ctypes)]
c295e0f8 4220 extern "unadjusted" {
17df50a5
XL
4221 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64")]
4222 fn vcvtpq_s64_f64_(a: float64x2_t) -> int64x2_t;
4223 }
4224 vcvtpq_s64_f64_(a)
4225}
4226
4227/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4228///
4229/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)
17df50a5
XL
4230#[inline]
4231#[target_feature(enable = "neon")]
4232#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4234pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 {
4235 #[allow(improper_ctypes)]
c295e0f8 4236 extern "unadjusted" {
17df50a5
XL
4237 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.i32.f32")]
4238 fn vcvtps_s32_f32_(a: f32) -> i32;
4239 }
4240 vcvtps_s32_f32_(a)
4241}
4242
4243/// Floating-point convert to signed integer, rounding toward plus infinity
f2b60f7d
FG
4244///
4245/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)
17df50a5
XL
4246#[inline]
4247#[target_feature(enable = "neon")]
4248#[cfg_attr(test, assert_instr(fcvtps))]
a2a8927a 4249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4250pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 {
4251 #[allow(improper_ctypes)]
c295e0f8 4252 extern "unadjusted" {
17df50a5
XL
4253 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.i64.f64")]
4254 fn vcvtpd_s64_f64_(a: f64) -> i64;
4255 }
4256 vcvtpd_s64_f64_(a)
4257}
4258
4259/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
f2b60f7d
FG
4260///
4261/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)
17df50a5
XL
4262#[inline]
4263#[target_feature(enable = "neon")]
4264#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 4265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4266pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
4267 #[allow(improper_ctypes)]
c295e0f8 4268 extern "unadjusted" {
17df50a5
XL
4269 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32")]
4270 fn vcvta_u32_f32_(a: float32x2_t) -> uint32x2_t;
4271 }
4272 vcvta_u32_f32_(a)
4273}
4274
4275/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
f2b60f7d
FG
4276///
4277/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)
17df50a5
XL
4278#[inline]
4279#[target_feature(enable = "neon")]
4280#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 4281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4282pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
4283 #[allow(improper_ctypes)]
c295e0f8 4284 extern "unadjusted" {
17df50a5
XL
4285 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32")]
4286 fn vcvtaq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4287 }
4288 vcvtaq_u32_f32_(a)
4289}
4290
4291/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
f2b60f7d
FG
4292///
4293/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)
17df50a5
XL
4294#[inline]
4295#[target_feature(enable = "neon")]
4296#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 4297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4298pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
4299 #[allow(improper_ctypes)]
c295e0f8 4300 extern "unadjusted" {
17df50a5
XL
4301 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64")]
4302 fn vcvta_u64_f64_(a: float64x1_t) -> uint64x1_t;
4303 }
4304 vcvta_u64_f64_(a)
4305}
4306
4307/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
f2b60f7d
FG
4308///
4309/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)
17df50a5
XL
4310#[inline]
4311#[target_feature(enable = "neon")]
4312#[cfg_attr(test, assert_instr(fcvtau))]
a2a8927a 4313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4314pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
4315 #[allow(improper_ctypes)]
c295e0f8 4316 extern "unadjusted" {
17df50a5
XL
4317 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64")]
4318 fn vcvtaq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4319 }
4320 vcvtaq_u64_f64_(a)
4321}
4322
4323/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4324///
4325/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)
17df50a5
XL
4326#[inline]
4327#[target_feature(enable = "neon")]
4328#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4330pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
4331 #[allow(improper_ctypes)]
c295e0f8 4332 extern "unadjusted" {
17df50a5
XL
4333 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32")]
4334 fn vcvtn_u32_f32_(a: float32x2_t) -> uint32x2_t;
4335 }
4336 vcvtn_u32_f32_(a)
4337}
4338
4339/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4340///
4341/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)
17df50a5
XL
4342#[inline]
4343#[target_feature(enable = "neon")]
4344#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4346pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
4347 #[allow(improper_ctypes)]
c295e0f8 4348 extern "unadjusted" {
17df50a5
XL
4349 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32")]
4350 fn vcvtnq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4351 }
4352 vcvtnq_u32_f32_(a)
4353}
4354
4355/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4356///
4357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)
17df50a5
XL
4358#[inline]
4359#[target_feature(enable = "neon")]
4360#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4362pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
4363 #[allow(improper_ctypes)]
c295e0f8 4364 extern "unadjusted" {
17df50a5
XL
4365 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64")]
4366 fn vcvtn_u64_f64_(a: float64x1_t) -> uint64x1_t;
4367 }
4368 vcvtn_u64_f64_(a)
4369}
4370
4371/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4372///
4373/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)
17df50a5
XL
4374#[inline]
4375#[target_feature(enable = "neon")]
4376#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4378pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
4379 #[allow(improper_ctypes)]
c295e0f8 4380 extern "unadjusted" {
17df50a5
XL
4381 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64")]
4382 fn vcvtnq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4383 }
4384 vcvtnq_u64_f64_(a)
4385}
4386
4387/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4388///
4389/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)
17df50a5
XL
4390#[inline]
4391#[target_feature(enable = "neon")]
4392#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4394pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 {
4395 #[allow(improper_ctypes)]
c295e0f8 4396 extern "unadjusted" {
17df50a5
XL
4397 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.i32.f32")]
4398 fn vcvtns_u32_f32_(a: f32) -> u32;
4399 }
4400 vcvtns_u32_f32_(a)
4401}
4402
4403/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
f2b60f7d
FG
4404///
4405/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)
17df50a5
XL
4406#[inline]
4407#[target_feature(enable = "neon")]
4408#[cfg_attr(test, assert_instr(fcvtnu))]
a2a8927a 4409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4410pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 {
4411 #[allow(improper_ctypes)]
c295e0f8 4412 extern "unadjusted" {
17df50a5
XL
4413 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.i64.f64")]
4414 fn vcvtnd_u64_f64_(a: f64) -> u64;
4415 }
4416 vcvtnd_u64_f64_(a)
4417}
4418
4419/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4420///
4421/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)
17df50a5
XL
4422#[inline]
4423#[target_feature(enable = "neon")]
4424#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4426pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
4427 #[allow(improper_ctypes)]
c295e0f8 4428 extern "unadjusted" {
17df50a5
XL
4429 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32")]
4430 fn vcvtm_u32_f32_(a: float32x2_t) -> uint32x2_t;
4431 }
4432 vcvtm_u32_f32_(a)
4433}
4434
4435/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4436///
4437/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)
17df50a5
XL
4438#[inline]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4442pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
4443 #[allow(improper_ctypes)]
c295e0f8 4444 extern "unadjusted" {
17df50a5
XL
4445 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32")]
4446 fn vcvtmq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4447 }
4448 vcvtmq_u32_f32_(a)
4449}
4450
4451/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4452///
4453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)
17df50a5
XL
4454#[inline]
4455#[target_feature(enable = "neon")]
4456#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4458pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
4459 #[allow(improper_ctypes)]
c295e0f8 4460 extern "unadjusted" {
17df50a5
XL
4461 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64")]
4462 fn vcvtm_u64_f64_(a: float64x1_t) -> uint64x1_t;
4463 }
4464 vcvtm_u64_f64_(a)
4465}
4466
4467/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4468///
4469/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)
17df50a5
XL
4470#[inline]
4471#[target_feature(enable = "neon")]
4472#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4474pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
4475 #[allow(improper_ctypes)]
c295e0f8 4476 extern "unadjusted" {
17df50a5
XL
4477 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64")]
4478 fn vcvtmq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4479 }
4480 vcvtmq_u64_f64_(a)
4481}
4482
4483/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4484///
4485/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)
17df50a5
XL
4486#[inline]
4487#[target_feature(enable = "neon")]
4488#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4490pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 {
4491 #[allow(improper_ctypes)]
c295e0f8 4492 extern "unadjusted" {
17df50a5
XL
4493 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.i32.f32")]
4494 fn vcvtms_u32_f32_(a: f32) -> u32;
4495 }
4496 vcvtms_u32_f32_(a)
4497}
4498
4499/// Floating-point convert to unsigned integer, rounding toward minus infinity
f2b60f7d
FG
4500///
4501/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)
17df50a5
XL
4502#[inline]
4503#[target_feature(enable = "neon")]
4504#[cfg_attr(test, assert_instr(fcvtmu))]
a2a8927a 4505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4506pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 {
4507 #[allow(improper_ctypes)]
c295e0f8 4508 extern "unadjusted" {
17df50a5
XL
4509 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.i64.f64")]
4510 fn vcvtmd_u64_f64_(a: f64) -> u64;
4511 }
4512 vcvtmd_u64_f64_(a)
4513}
4514
4515/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4516///
4517/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)
17df50a5
XL
4518#[inline]
4519#[target_feature(enable = "neon")]
4520#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4522pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
4523 #[allow(improper_ctypes)]
c295e0f8 4524 extern "unadjusted" {
17df50a5
XL
4525 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32")]
4526 fn vcvtp_u32_f32_(a: float32x2_t) -> uint32x2_t;
4527 }
4528 vcvtp_u32_f32_(a)
4529}
4530
4531/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4532///
4533/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)
17df50a5
XL
4534#[inline]
4535#[target_feature(enable = "neon")]
4536#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4538pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
4539 #[allow(improper_ctypes)]
c295e0f8 4540 extern "unadjusted" {
17df50a5
XL
4541 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32")]
4542 fn vcvtpq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4543 }
4544 vcvtpq_u32_f32_(a)
4545}
4546
4547/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4548///
4549/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)
17df50a5
XL
4550#[inline]
4551#[target_feature(enable = "neon")]
4552#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4554pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
4555 #[allow(improper_ctypes)]
c295e0f8 4556 extern "unadjusted" {
17df50a5
XL
4557 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64")]
4558 fn vcvtp_u64_f64_(a: float64x1_t) -> uint64x1_t;
4559 }
4560 vcvtp_u64_f64_(a)
4561}
4562
4563/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4564///
4565/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)
17df50a5
XL
4566#[inline]
4567#[target_feature(enable = "neon")]
4568#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4570pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
4571 #[allow(improper_ctypes)]
c295e0f8 4572 extern "unadjusted" {
17df50a5
XL
4573 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64")]
4574 fn vcvtpq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4575 }
4576 vcvtpq_u64_f64_(a)
4577}
4578
4579/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4580///
4581/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)
17df50a5
XL
4582#[inline]
4583#[target_feature(enable = "neon")]
4584#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4586pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 {
4587 #[allow(improper_ctypes)]
c295e0f8 4588 extern "unadjusted" {
17df50a5
XL
4589 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.i32.f32")]
4590 fn vcvtps_u32_f32_(a: f32) -> u32;
4591 }
4592 vcvtps_u32_f32_(a)
4593}
4594
4595/// Floating-point convert to unsigned integer, rounding toward plus infinity
f2b60f7d
FG
4596///
4597/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)
17df50a5
XL
4598#[inline]
4599#[target_feature(enable = "neon")]
4600#[cfg_attr(test, assert_instr(fcvtpu))]
a2a8927a 4601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
4602pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 {
4603 #[allow(improper_ctypes)]
c295e0f8 4604 extern "unadjusted" {
17df50a5
XL
4605 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.i64.f64")]
4606 fn vcvtpd_u64_f64_(a: f64) -> u64;
4607 }
4608 vcvtpd_u64_f64_(a)
4609}
4610
4611/// Set all vector lanes to the same value
f2b60f7d
FG
4612///
4613/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)
17df50a5
XL
4614#[inline]
4615#[target_feature(enable = "neon")]
4616#[cfg_attr(test, assert_instr(dup, N = 1))]
4617#[rustc_legacy_const_generics(1)]
a2a8927a 4618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4619pub unsafe fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
353b0b11
FG
4620 static_assert_uimm_bits!(N, 1);
4621 simd_shuffle!(a, a, [N as u32, N as u32])
17df50a5
XL
4622}
4623
4624/// Set all vector lanes to the same value
f2b60f7d
FG
4625///
4626/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)
17df50a5
XL
4627#[inline]
4628#[target_feature(enable = "neon")]
4629#[cfg_attr(test, assert_instr(dup, N = 0))]
4630#[rustc_legacy_const_generics(1)]
a2a8927a 4631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4632pub unsafe fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
353b0b11
FG
4633 static_assert!(N == 0);
4634 simd_shuffle!(a, a, [N as u32, N as u32])
17df50a5
XL
4635}
4636
4637/// Set all vector lanes to the same value
f2b60f7d
FG
4638///
4639/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)
17df50a5
XL
4640#[inline]
4641#[target_feature(enable = "neon")]
4642#[cfg_attr(test, assert_instr(dup, N = 1))]
4643#[rustc_legacy_const_generics(1)]
a2a8927a 4644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4645pub unsafe fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
353b0b11
FG
4646 static_assert_uimm_bits!(N, 1);
4647 simd_shuffle!(a, a, [N as u32, N as u32])
17df50a5
XL
4648}
4649
4650/// Set all vector lanes to the same value
f2b60f7d
FG
4651///
4652/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)
17df50a5
XL
4653#[inline]
4654#[target_feature(enable = "neon")]
4655#[cfg_attr(test, assert_instr(dup, N = 0))]
4656#[rustc_legacy_const_generics(1)]
a2a8927a 4657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4658pub unsafe fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
353b0b11
FG
4659 static_assert!(N == 0);
4660 simd_shuffle!(a, a, [N as u32, N as u32])
17df50a5
XL
4661}
4662
4663/// Set all vector lanes to the same value
f2b60f7d
FG
4664///
4665/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)
17df50a5
XL
4666#[inline]
4667#[target_feature(enable = "neon")]
4668#[cfg_attr(test, assert_instr(nop, N = 0))]
4669#[rustc_legacy_const_generics(1)]
a2a8927a 4670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4671pub unsafe fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
353b0b11 4672 static_assert!(N == 0);
17df50a5
XL
4673 a
4674}
4675
4676/// Set all vector lanes to the same value
f2b60f7d
FG
4677///
4678/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)
17df50a5
XL
4679#[inline]
4680#[target_feature(enable = "neon")]
4681#[cfg_attr(test, assert_instr(nop, N = 0))]
4682#[rustc_legacy_const_generics(1)]
a2a8927a 4683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4684pub unsafe fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
353b0b11 4685 static_assert!(N == 0);
17df50a5
XL
4686 a
4687}
4688
4689/// Set all vector lanes to the same value
f2b60f7d
FG
4690///
4691/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)
17df50a5
XL
4692#[inline]
4693#[target_feature(enable = "neon")]
4694#[cfg_attr(test, assert_instr(nop, N = 1))]
4695#[rustc_legacy_const_generics(1)]
a2a8927a 4696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4697pub unsafe fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
353b0b11 4698 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4699 transmute::<u64, _>(simd_extract(a, N as u32))
4700}
4701
4702/// Set all vector lanes to the same value
f2b60f7d
FG
4703///
4704/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)
17df50a5
XL
4705#[inline]
4706#[target_feature(enable = "neon")]
4707#[cfg_attr(test, assert_instr(nop, N = 1))]
4708#[rustc_legacy_const_generics(1)]
a2a8927a 4709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4710pub unsafe fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
353b0b11 4711 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4712 transmute::<f64, _>(simd_extract(a, N as u32))
4713}
4714
4715/// Set all vector lanes to the same value
f2b60f7d
FG
4716///
4717/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)
17df50a5
XL
4718#[inline]
4719#[target_feature(enable = "neon")]
4720#[cfg_attr(test, assert_instr(nop, N = 4))]
4721#[rustc_legacy_const_generics(1)]
a2a8927a 4722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4723pub unsafe fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
353b0b11 4724 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4725 simd_extract(a, N as u32)
4726}
4727
4728/// Set all vector lanes to the same value
f2b60f7d
FG
4729///
4730/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)
17df50a5
XL
4731#[inline]
4732#[target_feature(enable = "neon")]
4733#[cfg_attr(test, assert_instr(nop, N = 8))]
4734#[rustc_legacy_const_generics(1)]
a2a8927a 4735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4736pub unsafe fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
353b0b11 4737 static_assert_uimm_bits!(N, 4);
17df50a5
XL
4738 simd_extract(a, N as u32)
4739}
4740
4741/// Set all vector lanes to the same value
f2b60f7d
FG
4742///
4743/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)
17df50a5
XL
4744#[inline]
4745#[target_feature(enable = "neon")]
4746#[cfg_attr(test, assert_instr(nop, N = 2))]
4747#[rustc_legacy_const_generics(1)]
a2a8927a 4748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4749pub unsafe fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
353b0b11 4750 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4751 simd_extract(a, N as u32)
4752}
4753
4754/// Set all vector lanes to the same value
f2b60f7d
FG
4755///
4756/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)
17df50a5
XL
4757#[inline]
4758#[target_feature(enable = "neon")]
4759#[cfg_attr(test, assert_instr(nop, N = 4))]
4760#[rustc_legacy_const_generics(1)]
a2a8927a 4761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4762pub unsafe fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
353b0b11 4763 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4764 simd_extract(a, N as u32)
4765}
4766
4767/// Set all vector lanes to the same value
f2b60f7d
FG
4768///
4769/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)
17df50a5
XL
4770#[inline]
4771#[target_feature(enable = "neon")]
4772#[cfg_attr(test, assert_instr(nop, N = 1))]
4773#[rustc_legacy_const_generics(1)]
a2a8927a 4774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4775pub unsafe fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
353b0b11 4776 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4777 simd_extract(a, N as u32)
4778}
4779
4780/// Set all vector lanes to the same value
f2b60f7d
FG
4781///
4782/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)
17df50a5
XL
4783#[inline]
4784#[target_feature(enable = "neon")]
4785#[cfg_attr(test, assert_instr(nop, N = 2))]
4786#[rustc_legacy_const_generics(1)]
a2a8927a 4787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4788pub unsafe fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
353b0b11 4789 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4790 simd_extract(a, N as u32)
4791}
4792
4793/// Set all vector lanes to the same value
f2b60f7d
FG
4794///
4795/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)
17df50a5
XL
4796#[inline]
4797#[target_feature(enable = "neon")]
4798#[cfg_attr(test, assert_instr(nop, N = 0))]
4799#[rustc_legacy_const_generics(1)]
a2a8927a 4800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4801pub unsafe fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
353b0b11 4802 static_assert!(N == 0);
17df50a5
XL
4803 simd_extract(a, N as u32)
4804}
4805
4806/// Set all vector lanes to the same value
f2b60f7d
FG
4807///
4808/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)
17df50a5
XL
4809#[inline]
4810#[target_feature(enable = "neon")]
4811#[cfg_attr(test, assert_instr(nop, N = 1))]
4812#[rustc_legacy_const_generics(1)]
a2a8927a 4813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4814pub unsafe fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
353b0b11 4815 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4816 simd_extract(a, N as u32)
4817}
4818
4819/// Set all vector lanes to the same value
f2b60f7d
FG
4820///
4821/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)
17df50a5
XL
4822#[inline]
4823#[target_feature(enable = "neon")]
4824#[cfg_attr(test, assert_instr(nop, N = 4))]
4825#[rustc_legacy_const_generics(1)]
a2a8927a 4826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4827pub unsafe fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
353b0b11 4828 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4829 simd_extract(a, N as u32)
4830}
4831
4832/// Set all vector lanes to the same value
f2b60f7d
FG
4833///
4834/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)
17df50a5
XL
4835#[inline]
4836#[target_feature(enable = "neon")]
4837#[cfg_attr(test, assert_instr(nop, N = 8))]
4838#[rustc_legacy_const_generics(1)]
a2a8927a 4839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4840pub unsafe fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
353b0b11 4841 static_assert_uimm_bits!(N, 4);
17df50a5
XL
4842 simd_extract(a, N as u32)
4843}
4844
4845/// Set all vector lanes to the same value
f2b60f7d
FG
4846///
4847/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)
17df50a5
XL
4848#[inline]
4849#[target_feature(enable = "neon")]
4850#[cfg_attr(test, assert_instr(nop, N = 2))]
4851#[rustc_legacy_const_generics(1)]
a2a8927a 4852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4853pub unsafe fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
353b0b11 4854 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4855 simd_extract(a, N as u32)
4856}
4857
4858/// Set all vector lanes to the same value
f2b60f7d
FG
4859///
4860/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)
17df50a5
XL
4861#[inline]
4862#[target_feature(enable = "neon")]
4863#[cfg_attr(test, assert_instr(nop, N = 4))]
4864#[rustc_legacy_const_generics(1)]
a2a8927a 4865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4866pub unsafe fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
353b0b11 4867 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4868 simd_extract(a, N as u32)
4869}
4870
4871/// Set all vector lanes to the same value
f2b60f7d
FG
4872///
4873/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)
17df50a5
XL
4874#[inline]
4875#[target_feature(enable = "neon")]
4876#[cfg_attr(test, assert_instr(nop, N = 1))]
4877#[rustc_legacy_const_generics(1)]
a2a8927a 4878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4879pub unsafe fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
353b0b11 4880 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4881 simd_extract(a, N as u32)
4882}
4883
4884/// Set all vector lanes to the same value
f2b60f7d
FG
4885///
4886/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)
17df50a5
XL
4887#[inline]
4888#[target_feature(enable = "neon")]
4889#[cfg_attr(test, assert_instr(nop, N = 2))]
4890#[rustc_legacy_const_generics(1)]
a2a8927a 4891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4892pub unsafe fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
353b0b11 4893 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4894 simd_extract(a, N as u32)
4895}
4896
4897/// Set all vector lanes to the same value
f2b60f7d
FG
4898///
4899/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)
17df50a5
XL
4900#[inline]
4901#[target_feature(enable = "neon")]
4902#[cfg_attr(test, assert_instr(nop, N = 0))]
4903#[rustc_legacy_const_generics(1)]
a2a8927a 4904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4905pub unsafe fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
353b0b11 4906 static_assert!(N == 0);
17df50a5
XL
4907 simd_extract(a, N as u32)
4908}
4909
4910/// Set all vector lanes to the same value
f2b60f7d
FG
4911///
4912/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)
17df50a5
XL
4913#[inline]
4914#[target_feature(enable = "neon")]
4915#[cfg_attr(test, assert_instr(nop, N = 1))]
4916#[rustc_legacy_const_generics(1)]
a2a8927a 4917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4918pub unsafe fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
353b0b11 4919 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4920 simd_extract(a, N as u32)
4921}
4922
4923/// Set all vector lanes to the same value
f2b60f7d
FG
4924///
4925/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)
17df50a5
XL
4926#[inline]
4927#[target_feature(enable = "neon")]
4928#[cfg_attr(test, assert_instr(nop, N = 4))]
4929#[rustc_legacy_const_generics(1)]
a2a8927a 4930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4931pub unsafe fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
353b0b11 4932 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4933 simd_extract(a, N as u32)
4934}
4935
4936/// Set all vector lanes to the same value
f2b60f7d
FG
4937///
4938/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)
17df50a5
XL
4939#[inline]
4940#[target_feature(enable = "neon")]
4941#[cfg_attr(test, assert_instr(nop, N = 8))]
4942#[rustc_legacy_const_generics(1)]
a2a8927a 4943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4944pub unsafe fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
353b0b11 4945 static_assert_uimm_bits!(N, 4);
17df50a5
XL
4946 simd_extract(a, N as u32)
4947}
4948
4949/// Set all vector lanes to the same value
f2b60f7d
FG
4950///
4951/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)
17df50a5
XL
4952#[inline]
4953#[target_feature(enable = "neon")]
4954#[cfg_attr(test, assert_instr(nop, N = 2))]
4955#[rustc_legacy_const_generics(1)]
a2a8927a 4956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4957pub unsafe fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
353b0b11 4958 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4959 simd_extract(a, N as u32)
4960}
4961
4962/// Set all vector lanes to the same value
f2b60f7d
FG
4963///
4964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)
17df50a5
XL
4965#[inline]
4966#[target_feature(enable = "neon")]
4967#[cfg_attr(test, assert_instr(nop, N = 4))]
4968#[rustc_legacy_const_generics(1)]
a2a8927a 4969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4970pub unsafe fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
353b0b11 4971 static_assert_uimm_bits!(N, 3);
17df50a5
XL
4972 simd_extract(a, N as u32)
4973}
4974
4975/// Set all vector lanes to the same value
f2b60f7d
FG
4976///
4977/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)
17df50a5
XL
4978#[inline]
4979#[target_feature(enable = "neon")]
4980#[cfg_attr(test, assert_instr(nop, N = 1))]
4981#[rustc_legacy_const_generics(1)]
a2a8927a 4982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4983pub unsafe fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
353b0b11 4984 static_assert_uimm_bits!(N, 1);
17df50a5
XL
4985 simd_extract(a, N as u32)
4986}
4987
4988/// Set all vector lanes to the same value
f2b60f7d
FG
4989///
4990/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)
17df50a5
XL
4991#[inline]
4992#[target_feature(enable = "neon")]
4993#[cfg_attr(test, assert_instr(nop, N = 2))]
4994#[rustc_legacy_const_generics(1)]
a2a8927a 4995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 4996pub unsafe fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
353b0b11 4997 static_assert_uimm_bits!(N, 2);
17df50a5
XL
4998 simd_extract(a, N as u32)
4999}
5000
5001/// Set all vector lanes to the same value
f2b60f7d
FG
5002///
5003/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)
17df50a5
XL
5004#[inline]
5005#[target_feature(enable = "neon")]
5006#[cfg_attr(test, assert_instr(nop, N = 0))]
5007#[rustc_legacy_const_generics(1)]
a2a8927a 5008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5009pub unsafe fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
353b0b11 5010 static_assert!(N == 0);
17df50a5
XL
5011 simd_extract(a, N as u32)
5012}
5013
5014/// Set all vector lanes to the same value
f2b60f7d
FG
5015///
5016/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)
17df50a5
XL
5017#[inline]
5018#[target_feature(enable = "neon")]
5019#[cfg_attr(test, assert_instr(nop, N = 1))]
5020#[rustc_legacy_const_generics(1)]
a2a8927a 5021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5022pub unsafe fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
353b0b11 5023 static_assert_uimm_bits!(N, 1);
17df50a5
XL
5024 simd_extract(a, N as u32)
5025}
5026
5027/// Extract vector from pair of vectors
f2b60f7d
FG
5028///
5029/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)
17df50a5
XL
5030#[inline]
5031#[target_feature(enable = "neon")]
5032#[cfg_attr(test, assert_instr(ext, N = 1))]
5033#[rustc_legacy_const_generics(2)]
a2a8927a 5034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5035pub unsafe fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 5036 static_assert_uimm_bits!(N, 1);
17df50a5 5037 match N & 0b1 {
353b0b11
FG
5038 0 => simd_shuffle!(a, b, [0, 1]),
5039 1 => simd_shuffle!(a, b, [1, 2]),
17df50a5
XL
5040 _ => unreachable_unchecked(),
5041 }
5042}
5043
5044/// Extract vector from pair of vectors
f2b60f7d
FG
5045///
5046/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)
17df50a5
XL
5047#[inline]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(ext, N = 1))]
5050#[rustc_legacy_const_generics(2)]
a2a8927a 5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5052pub unsafe fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 5053 static_assert_uimm_bits!(N, 1);
17df50a5 5054 match N & 0b1 {
353b0b11
FG
5055 0 => simd_shuffle!(a, b, [0, 1]),
5056 1 => simd_shuffle!(a, b, [1, 2]),
17df50a5
XL
5057 _ => unreachable_unchecked(),
5058 }
5059}
5060
5061/// Floating-point multiply-add to accumulator
f2b60f7d
FG
5062///
5063/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)
17df50a5
XL
5064#[inline]
5065#[target_feature(enable = "neon")]
5066#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 5067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5068pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
5069 simd_add(a, simd_mul(b, c))
5070}
5071
5072/// Floating-point multiply-add to accumulator
f2b60f7d
FG
5073///
5074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)
17df50a5
XL
5075#[inline]
5076#[target_feature(enable = "neon")]
5077#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 5078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5079pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
5080 simd_add(a, simd_mul(b, c))
5081}
5082
5083/// Signed multiply-add long
f2b60f7d
FG
5084///
5085/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)
17df50a5
XL
5086#[inline]
5087#[target_feature(enable = "neon")]
5088#[cfg_attr(test, assert_instr(smlal2))]
a2a8927a 5089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5090pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
353b0b11
FG
5091 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5092 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
5093 vmlal_s8(a, b, c)
5094}
5095
5096/// Signed multiply-add long
f2b60f7d
FG
5097///
5098/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)
17df50a5
XL
5099#[inline]
5100#[target_feature(enable = "neon")]
5101#[cfg_attr(test, assert_instr(smlal2))]
a2a8927a 5102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5103pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11
FG
5104 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5105 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17df50a5
XL
5106 vmlal_s16(a, b, c)
5107}
5108
5109/// Signed multiply-add long
f2b60f7d
FG
5110///
5111/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)
17df50a5
XL
5112#[inline]
5113#[target_feature(enable = "neon")]
5114#[cfg_attr(test, assert_instr(smlal2))]
a2a8927a 5115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5116pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11
FG
5117 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
5118 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
17df50a5
XL
5119 vmlal_s32(a, b, c)
5120}
5121
5122/// Unsigned multiply-add long
f2b60f7d
FG
5123///
5124/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)
17df50a5
XL
5125#[inline]
5126#[target_feature(enable = "neon")]
5127#[cfg_attr(test, assert_instr(umlal2))]
a2a8927a 5128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5129pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
353b0b11
FG
5130 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5131 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
5132 vmlal_u8(a, b, c)
5133}
5134
5135/// Unsigned multiply-add long
f2b60f7d
FG
5136///
5137/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)
17df50a5
XL
5138#[inline]
5139#[target_feature(enable = "neon")]
5140#[cfg_attr(test, assert_instr(umlal2))]
a2a8927a 5141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5142pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
353b0b11
FG
5143 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5144 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17df50a5
XL
5145 vmlal_u16(a, b, c)
5146}
5147
5148/// Unsigned multiply-add long
f2b60f7d
FG
5149///
5150/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)
17df50a5
XL
5151#[inline]
5152#[target_feature(enable = "neon")]
5153#[cfg_attr(test, assert_instr(umlal2))]
a2a8927a 5154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5155pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
353b0b11
FG
5156 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
5157 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
17df50a5
XL
5158 vmlal_u32(a, b, c)
5159}
5160
5161/// Multiply-add long
f2b60f7d
FG
5162///
5163/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)
17df50a5
XL
5164#[inline]
5165#[target_feature(enable = "neon")]
5166#[cfg_attr(test, assert_instr(smlal2))]
a2a8927a 5167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5168pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
5169 vmlal_high_s16(a, b, vdupq_n_s16(c))
5170}
5171
5172/// Multiply-add long
f2b60f7d
FG
5173///
5174/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)
17df50a5
XL
5175#[inline]
5176#[target_feature(enable = "neon")]
5177#[cfg_attr(test, assert_instr(smlal2))]
a2a8927a 5178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5179pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
5180 vmlal_high_s32(a, b, vdupq_n_s32(c))
5181}
5182
5183/// Multiply-add long
f2b60f7d
FG
5184///
5185/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)
17df50a5
XL
5186#[inline]
5187#[target_feature(enable = "neon")]
5188#[cfg_attr(test, assert_instr(umlal2))]
a2a8927a 5189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5190pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
5191 vmlal_high_u16(a, b, vdupq_n_u16(c))
5192}
5193
5194/// Multiply-add long
f2b60f7d
FG
5195///
5196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)
17df50a5
XL
5197#[inline]
5198#[target_feature(enable = "neon")]
5199#[cfg_attr(test, assert_instr(umlal2))]
a2a8927a 5200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5201pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
5202 vmlal_high_u32(a, b, vdupq_n_u32(c))
5203}
5204
5205/// Multiply-add long
f2b60f7d
FG
5206///
5207/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)
17df50a5
XL
5208#[inline]
5209#[target_feature(enable = "neon")]
5210#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5211#[rustc_legacy_const_generics(3)]
a2a8927a 5212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5213pub unsafe fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
353b0b11
FG
5214 static_assert_uimm_bits!(LANE, 2);
5215 vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5216}
5217
5218/// Multiply-add long
f2b60f7d
FG
5219///
5220/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)
17df50a5
XL
5221#[inline]
5222#[target_feature(enable = "neon")]
5223#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5224#[rustc_legacy_const_generics(3)]
a2a8927a 5225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5226pub unsafe fn vmlal_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11
FG
5227 static_assert_uimm_bits!(LANE, 3);
5228 vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5229}
5230
5231/// Multiply-add long
f2b60f7d
FG
5232///
5233/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)
17df50a5
XL
5234#[inline]
5235#[target_feature(enable = "neon")]
5236#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5237#[rustc_legacy_const_generics(3)]
a2a8927a 5238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5239pub unsafe fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
353b0b11
FG
5240 static_assert_uimm_bits!(LANE, 1);
5241 vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5242}
5243
5244/// Multiply-add long
f2b60f7d
FG
5245///
5246/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)
17df50a5
XL
5247#[inline]
5248#[target_feature(enable = "neon")]
5249#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5250#[rustc_legacy_const_generics(3)]
a2a8927a 5251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5252pub unsafe fn vmlal_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11
FG
5253 static_assert_uimm_bits!(LANE, 2);
5254 vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5255}
5256
5257/// Multiply-add long
f2b60f7d
FG
5258///
5259/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)
17df50a5
XL
5260#[inline]
5261#[target_feature(enable = "neon")]
5262#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5263#[rustc_legacy_const_generics(3)]
a2a8927a 5264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5265pub unsafe fn vmlal_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
353b0b11
FG
5266 static_assert_uimm_bits!(LANE, 2);
5267 vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5268}
5269
5270/// Multiply-add long
f2b60f7d
FG
5271///
5272/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)
17df50a5
XL
5273#[inline]
5274#[target_feature(enable = "neon")]
5275#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5276#[rustc_legacy_const_generics(3)]
a2a8927a 5277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5278pub unsafe fn vmlal_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
353b0b11
FG
5279 static_assert_uimm_bits!(LANE, 3);
5280 vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5281}
5282
5283/// Multiply-add long
f2b60f7d
FG
5284///
5285/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)
17df50a5
XL
5286#[inline]
5287#[target_feature(enable = "neon")]
5288#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5289#[rustc_legacy_const_generics(3)]
a2a8927a 5290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5291pub unsafe fn vmlal_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
353b0b11
FG
5292 static_assert_uimm_bits!(LANE, 1);
5293 vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5294}
5295
5296/// Multiply-add long
f2b60f7d
FG
5297///
5298/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)
17df50a5
XL
5299#[inline]
5300#[target_feature(enable = "neon")]
5301#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5302#[rustc_legacy_const_generics(3)]
a2a8927a 5303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5304pub unsafe fn vmlal_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
353b0b11
FG
5305 static_assert_uimm_bits!(LANE, 2);
5306 vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5307}
5308
5309/// Floating-point multiply-subtract from accumulator
f2b60f7d
FG
5310///
5311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)
17df50a5
XL
5312#[inline]
5313#[target_feature(enable = "neon")]
5314#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 5315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5316pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
5317 simd_sub(a, simd_mul(b, c))
5318}
5319
5320/// Floating-point multiply-subtract from accumulator
f2b60f7d
FG
5321///
5322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)
17df50a5
XL
5323#[inline]
5324#[target_feature(enable = "neon")]
5325#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 5326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5327pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
5328 simd_sub(a, simd_mul(b, c))
5329}
5330
5331/// Signed multiply-subtract long
f2b60f7d
FG
5332///
5333/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)
17df50a5
XL
5334#[inline]
5335#[target_feature(enable = "neon")]
5336#[cfg_attr(test, assert_instr(smlsl2))]
a2a8927a 5337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5338pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
353b0b11
FG
5339 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5340 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
5341 vmlsl_s8(a, b, c)
5342}
5343
5344/// Signed multiply-subtract long
f2b60f7d
FG
5345///
5346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)
17df50a5
XL
5347#[inline]
5348#[target_feature(enable = "neon")]
5349#[cfg_attr(test, assert_instr(smlsl2))]
a2a8927a 5350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5351pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11
FG
5352 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5353 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17df50a5
XL
5354 vmlsl_s16(a, b, c)
5355}
5356
5357/// Signed multiply-subtract long
f2b60f7d
FG
5358///
5359/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)
17df50a5
XL
5360#[inline]
5361#[target_feature(enable = "neon")]
5362#[cfg_attr(test, assert_instr(smlsl2))]
a2a8927a 5363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5364pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11
FG
5365 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
5366 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
17df50a5
XL
5367 vmlsl_s32(a, b, c)
5368}
5369
5370/// Unsigned multiply-subtract long
f2b60f7d
FG
5371///
5372/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)
17df50a5
XL
5373#[inline]
5374#[target_feature(enable = "neon")]
5375#[cfg_attr(test, assert_instr(umlsl2))]
a2a8927a 5376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5377pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
353b0b11
FG
5378 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5379 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17df50a5
XL
5380 vmlsl_u8(a, b, c)
5381}
5382
5383/// Unsigned multiply-subtract long
f2b60f7d
FG
5384///
5385/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)
17df50a5
XL
5386#[inline]
5387#[target_feature(enable = "neon")]
5388#[cfg_attr(test, assert_instr(umlsl2))]
a2a8927a 5389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5390pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
353b0b11
FG
5391 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5392 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17df50a5
XL
5393 vmlsl_u16(a, b, c)
5394}
5395
5396/// Unsigned multiply-subtract long
f2b60f7d
FG
5397///
5398/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)
17df50a5
XL
5399#[inline]
5400#[target_feature(enable = "neon")]
5401#[cfg_attr(test, assert_instr(umlsl2))]
a2a8927a 5402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5403pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
353b0b11
FG
5404 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
5405 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
17df50a5
XL
5406 vmlsl_u32(a, b, c)
5407}
5408
5409/// Multiply-subtract long
f2b60f7d
FG
5410///
5411/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)
17df50a5
XL
5412#[inline]
5413#[target_feature(enable = "neon")]
5414#[cfg_attr(test, assert_instr(smlsl2))]
a2a8927a 5415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5416pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
5417 vmlsl_high_s16(a, b, vdupq_n_s16(c))
5418}
5419
5420/// Multiply-subtract long
f2b60f7d
FG
5421///
5422/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)
17df50a5
XL
5423#[inline]
5424#[target_feature(enable = "neon")]
5425#[cfg_attr(test, assert_instr(smlsl2))]
a2a8927a 5426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5427pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
5428 vmlsl_high_s32(a, b, vdupq_n_s32(c))
5429}
5430
5431/// Multiply-subtract long
f2b60f7d
FG
5432///
5433/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)
17df50a5
XL
5434#[inline]
5435#[target_feature(enable = "neon")]
5436#[cfg_attr(test, assert_instr(umlsl2))]
a2a8927a 5437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5438pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
5439 vmlsl_high_u16(a, b, vdupq_n_u16(c))
5440}
5441
5442/// Multiply-subtract long
f2b60f7d
FG
5443///
5444/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)
17df50a5
XL
5445#[inline]
5446#[target_feature(enable = "neon")]
5447#[cfg_attr(test, assert_instr(umlsl2))]
a2a8927a 5448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5449pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
5450 vmlsl_high_u32(a, b, vdupq_n_u32(c))
5451}
5452
5453/// Multiply-subtract long
f2b60f7d
FG
5454///
5455/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)
17df50a5
XL
5456#[inline]
5457#[target_feature(enable = "neon")]
5458#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5459#[rustc_legacy_const_generics(3)]
a2a8927a 5460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5461pub unsafe fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
353b0b11
FG
5462 static_assert_uimm_bits!(LANE, 2);
5463 vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5464}
5465
5466/// Multiply-subtract long
f2b60f7d
FG
5467///
5468/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)
17df50a5
XL
5469#[inline]
5470#[target_feature(enable = "neon")]
5471#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5472#[rustc_legacy_const_generics(3)]
a2a8927a 5473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5474pub unsafe fn vmlsl_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11
FG
5475 static_assert_uimm_bits!(LANE, 3);
5476 vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5477}
5478
5479/// Multiply-subtract long
f2b60f7d
FG
5480///
5481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)
17df50a5
XL
5482#[inline]
5483#[target_feature(enable = "neon")]
5484#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5485#[rustc_legacy_const_generics(3)]
a2a8927a 5486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5487pub unsafe fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
353b0b11
FG
5488 static_assert_uimm_bits!(LANE, 1);
5489 vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5490}
5491
5492/// Multiply-subtract long
f2b60f7d
FG
5493///
5494/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)
17df50a5
XL
5495#[inline]
5496#[target_feature(enable = "neon")]
5497#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5498#[rustc_legacy_const_generics(3)]
a2a8927a 5499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5500pub unsafe fn vmlsl_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11
FG
5501 static_assert_uimm_bits!(LANE, 2);
5502 vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5503}
5504
5505/// Multiply-subtract long
f2b60f7d
FG
5506///
5507/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)
17df50a5
XL
5508#[inline]
5509#[target_feature(enable = "neon")]
5510#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5511#[rustc_legacy_const_generics(3)]
a2a8927a 5512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5513pub unsafe fn vmlsl_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
353b0b11
FG
5514 static_assert_uimm_bits!(LANE, 2);
5515 vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5516}
5517
5518/// Multiply-subtract long
f2b60f7d
FG
5519///
5520/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)
17df50a5
XL
5521#[inline]
5522#[target_feature(enable = "neon")]
5523#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5524#[rustc_legacy_const_generics(3)]
a2a8927a 5525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5526pub unsafe fn vmlsl_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
353b0b11
FG
5527 static_assert_uimm_bits!(LANE, 3);
5528 vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5529}
5530
5531/// Multiply-subtract long
f2b60f7d
FG
5532///
5533/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)
17df50a5
XL
5534#[inline]
5535#[target_feature(enable = "neon")]
5536#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5537#[rustc_legacy_const_generics(3)]
a2a8927a 5538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5539pub unsafe fn vmlsl_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
353b0b11
FG
5540 static_assert_uimm_bits!(LANE, 1);
5541 vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5542}
5543
5544/// Multiply-subtract long
f2b60f7d
FG
5545///
5546/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)
17df50a5
XL
5547#[inline]
5548#[target_feature(enable = "neon")]
5549#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5550#[rustc_legacy_const_generics(3)]
a2a8927a 5551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5 5552pub unsafe fn vmlsl_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
353b0b11
FG
5553 static_assert_uimm_bits!(LANE, 2);
5554 vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
5555}
5556
5557/// Extract narrow
f2b60f7d
FG
5558///
5559/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)
17df50a5
XL
5560#[inline]
5561#[target_feature(enable = "neon")]
5562#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5564pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
5565 let c: int8x8_t = simd_cast(b);
353b0b11 5566 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
5567}
5568
5569/// Extract narrow
f2b60f7d
FG
5570///
5571/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)
17df50a5
XL
5572#[inline]
5573#[target_feature(enable = "neon")]
5574#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5576pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
5577 let c: int16x4_t = simd_cast(b);
353b0b11 5578 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
5579}
5580
5581/// Extract narrow
f2b60f7d
FG
5582///
5583/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)
17df50a5
XL
5584#[inline]
5585#[target_feature(enable = "neon")]
5586#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5588pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
5589 let c: int32x2_t = simd_cast(b);
353b0b11 5590 simd_shuffle!(a, c, [0, 1, 2, 3])
17df50a5
XL
5591}
5592
5593/// Extract narrow
f2b60f7d
FG
5594///
5595/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)
17df50a5
XL
5596#[inline]
5597#[target_feature(enable = "neon")]
5598#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5600pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
5601 let c: uint8x8_t = simd_cast(b);
353b0b11 5602 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
5603}
5604
5605/// Extract narrow
f2b60f7d
FG
5606///
5607/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)
17df50a5
XL
5608#[inline]
5609#[target_feature(enable = "neon")]
5610#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5612pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
5613 let c: uint16x4_t = simd_cast(b);
353b0b11 5614 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
5615}
5616
5617/// Extract narrow
f2b60f7d
FG
5618///
5619/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)
17df50a5
XL
5620#[inline]
5621#[target_feature(enable = "neon")]
5622#[cfg_attr(test, assert_instr(xtn2))]
a2a8927a 5623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5624pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
5625 let c: uint32x2_t = simd_cast(b);
353b0b11 5626 simd_shuffle!(a, c, [0, 1, 2, 3])
17df50a5
XL
5627}
5628
5629/// Negate
f2b60f7d
FG
5630///
5631/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)
17df50a5
XL
5632#[inline]
5633#[target_feature(enable = "neon")]
5634#[cfg_attr(test, assert_instr(neg))]
a2a8927a 5635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5636pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t {
5637 simd_neg(a)
5638}
5639
5640/// Negate
f2b60f7d
FG
5641///
5642/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)
17df50a5
XL
5643#[inline]
5644#[target_feature(enable = "neon")]
5645#[cfg_attr(test, assert_instr(neg))]
a2a8927a 5646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5647pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t {
5648 simd_neg(a)
5649}
5650
3c0e092e 5651/// Negate
f2b60f7d
FG
5652///
5653/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)
3c0e092e
XL
5654#[inline]
5655#[target_feature(enable = "neon")]
5656#[cfg_attr(test, assert_instr(neg))]
a2a8927a 5657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 5658pub unsafe fn vnegd_s64(a: i64) -> i64 {
a2a8927a 5659 a.wrapping_neg()
3c0e092e
XL
5660}
5661
17df50a5 5662/// Negate
f2b60f7d
FG
5663///
5664/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)
17df50a5
XL
5665#[inline]
5666#[target_feature(enable = "neon")]
5667#[cfg_attr(test, assert_instr(fneg))]
a2a8927a 5668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5669pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t {
5670 simd_neg(a)
5671}
5672
5673/// Negate
f2b60f7d
FG
5674///
5675/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)
17df50a5
XL
5676#[inline]
5677#[target_feature(enable = "neon")]
5678#[cfg_attr(test, assert_instr(fneg))]
a2a8927a 5679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5680pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t {
5681 simd_neg(a)
5682}
5683
5684/// Signed saturating negate
f2b60f7d
FG
5685///
5686/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)
17df50a5
XL
5687#[inline]
5688#[target_feature(enable = "neon")]
5689#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5691pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t {
5692 #[allow(improper_ctypes)]
c295e0f8 5693 extern "unadjusted" {
17df50a5
XL
5694 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v1i64")]
5695 fn vqneg_s64_(a: int64x1_t) -> int64x1_t;
5696 }
5697 vqneg_s64_(a)
5698}
5699
5700/// Signed saturating negate
f2b60f7d
FG
5701///
5702/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)
17df50a5
XL
5703#[inline]
5704#[target_feature(enable = "neon")]
5705#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5707pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
5708 #[allow(improper_ctypes)]
c295e0f8 5709 extern "unadjusted" {
17df50a5
XL
5710 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v2i64")]
5711 fn vqnegq_s64_(a: int64x2_t) -> int64x2_t;
5712 }
5713 vqnegq_s64_(a)
5714}
5715
3c0e092e 5716/// Signed saturating negate
f2b60f7d
FG
5717///
5718/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)
3c0e092e
XL
5719#[inline]
5720#[target_feature(enable = "neon")]
5721#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
5723pub unsafe fn vqnegb_s8(a: i8) -> i8 {
5724 simd_extract(vqneg_s8(vdup_n_s8(a)), 0)
5725}
5726
5727/// Signed saturating negate
f2b60f7d
FG
5728///
5729/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)
3c0e092e
XL
5730#[inline]
5731#[target_feature(enable = "neon")]
5732#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
5734pub unsafe fn vqnegh_s16(a: i16) -> i16 {
5735 simd_extract(vqneg_s16(vdup_n_s16(a)), 0)
5736}
5737
5738/// Signed saturating negate
f2b60f7d
FG
5739///
5740/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)
3c0e092e
XL
5741#[inline]
5742#[target_feature(enable = "neon")]
5743#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
5745pub unsafe fn vqnegs_s32(a: i32) -> i32 {
5746 simd_extract(vqneg_s32(vdup_n_s32(a)), 0)
5747}
5748
5749/// Signed saturating negate
f2b60f7d
FG
5750///
5751/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)
3c0e092e
XL
5752#[inline]
5753#[target_feature(enable = "neon")]
5754#[cfg_attr(test, assert_instr(sqneg))]
a2a8927a 5755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
5756pub unsafe fn vqnegd_s64(a: i64) -> i64 {
5757 simd_extract(vqneg_s64(vdup_n_s64(a)), 0)
5758}
5759
17df50a5 5760/// Saturating subtract
f2b60f7d
FG
5761///
5762/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)
17df50a5
XL
5763#[inline]
5764#[target_feature(enable = "neon")]
5765#[cfg_attr(test, assert_instr(sqsub))]
a2a8927a 5766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5767pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 {
5768 let a: int8x8_t = vdup_n_s8(a);
5769 let b: int8x8_t = vdup_n_s8(b);
5770 simd_extract(vqsub_s8(a, b), 0)
5771}
5772
5773/// Saturating subtract
f2b60f7d
FG
5774///
5775/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)
17df50a5
XL
5776#[inline]
5777#[target_feature(enable = "neon")]
5778#[cfg_attr(test, assert_instr(sqsub))]
a2a8927a 5779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5780pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 {
5781 let a: int16x4_t = vdup_n_s16(a);
5782 let b: int16x4_t = vdup_n_s16(b);
5783 simd_extract(vqsub_s16(a, b), 0)
5784}
5785
5786/// Saturating subtract
f2b60f7d
FG
5787///
5788/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)
17df50a5
XL
5789#[inline]
5790#[target_feature(enable = "neon")]
5791#[cfg_attr(test, assert_instr(uqsub))]
a2a8927a 5792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5793pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 {
5794 let a: uint8x8_t = vdup_n_u8(a);
5795 let b: uint8x8_t = vdup_n_u8(b);
5796 simd_extract(vqsub_u8(a, b), 0)
5797}
5798
5799/// Saturating subtract
f2b60f7d
FG
5800///
5801/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)
17df50a5
XL
5802#[inline]
5803#[target_feature(enable = "neon")]
5804#[cfg_attr(test, assert_instr(uqsub))]
a2a8927a 5805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5806pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 {
5807 let a: uint16x4_t = vdup_n_u16(a);
5808 let b: uint16x4_t = vdup_n_u16(b);
5809 simd_extract(vqsub_u16(a, b), 0)
5810}
5811
5812/// Saturating subtract
f2b60f7d
FG
5813///
5814/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)
17df50a5
XL
5815#[inline]
5816#[target_feature(enable = "neon")]
5817#[cfg_attr(test, assert_instr(uqsub))]
a2a8927a 5818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5819pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 {
5820 #[allow(improper_ctypes)]
c295e0f8 5821 extern "unadjusted" {
17df50a5
XL
5822 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.i32")]
5823 fn vqsubs_u32_(a: u32, b: u32) -> u32;
5824 }
5825 vqsubs_u32_(a, b)
5826}
5827
5828/// Saturating subtract
f2b60f7d
FG
5829///
5830/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)
17df50a5
XL
5831#[inline]
5832#[target_feature(enable = "neon")]
5833#[cfg_attr(test, assert_instr(uqsub))]
a2a8927a 5834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5835pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 {
5836 #[allow(improper_ctypes)]
c295e0f8 5837 extern "unadjusted" {
17df50a5
XL
5838 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.i64")]
5839 fn vqsubd_u64_(a: u64, b: u64) -> u64;
5840 }
5841 vqsubd_u64_(a, b)
5842}
5843
5844/// Saturating subtract
f2b60f7d
FG
5845///
5846/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)
17df50a5
XL
5847#[inline]
5848#[target_feature(enable = "neon")]
5849#[cfg_attr(test, assert_instr(sqsub))]
a2a8927a 5850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5851pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 {
5852 #[allow(improper_ctypes)]
c295e0f8 5853 extern "unadjusted" {
17df50a5
XL
5854 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.i32")]
5855 fn vqsubs_s32_(a: i32, b: i32) -> i32;
5856 }
5857 vqsubs_s32_(a, b)
5858}
5859
5860/// Saturating subtract
f2b60f7d
FG
5861///
5862/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)
17df50a5
XL
5863#[inline]
5864#[target_feature(enable = "neon")]
5865#[cfg_attr(test, assert_instr(sqsub))]
a2a8927a 5866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5867pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 {
5868 #[allow(improper_ctypes)]
c295e0f8 5869 extern "unadjusted" {
17df50a5
XL
5870 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.i64")]
5871 fn vqsubd_s64_(a: i64, b: i64) -> i64;
5872 }
5873 vqsubd_s64_(a, b)
5874}
5875
5876/// Reverse bit order
f2b60f7d
FG
5877///
5878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)
17df50a5
XL
5879#[inline]
5880#[target_feature(enable = "neon")]
5881#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5883pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t {
5884 #[allow(improper_ctypes)]
c295e0f8 5885 extern "unadjusted" {
17df50a5
XL
5886 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rbit.v8i8")]
5887 fn vrbit_s8_(a: int8x8_t) -> int8x8_t;
5888 }
5889 vrbit_s8_(a)
5890}
5891
5892/// Reverse bit order
f2b60f7d
FG
5893///
5894/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)
17df50a5
XL
5895#[inline]
5896#[target_feature(enable = "neon")]
5897#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5899pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
5900 #[allow(improper_ctypes)]
c295e0f8 5901 extern "unadjusted" {
17df50a5
XL
5902 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rbit.v16i8")]
5903 fn vrbitq_s8_(a: int8x16_t) -> int8x16_t;
5904 }
5905 vrbitq_s8_(a)
5906}
5907
5908/// Reverse bit order
f2b60f7d
FG
5909///
5910/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)
17df50a5
XL
5911#[inline]
5912#[target_feature(enable = "neon")]
5913#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5915pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
5916 transmute(vrbit_s8(transmute(a)))
5917}
5918
5919/// Reverse bit order
f2b60f7d
FG
5920///
5921/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)
17df50a5
XL
5922#[inline]
5923#[target_feature(enable = "neon")]
5924#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5926pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
5927 transmute(vrbitq_s8(transmute(a)))
5928}
5929
5930/// Reverse bit order
f2b60f7d
FG
5931///
5932/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)
17df50a5
XL
5933#[inline]
5934#[target_feature(enable = "neon")]
5935#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5937pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
5938 transmute(vrbit_s8(transmute(a)))
5939}
5940
5941/// Reverse bit order
f2b60f7d
FG
5942///
5943/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)
17df50a5
XL
5944#[inline]
5945#[target_feature(enable = "neon")]
5946#[cfg_attr(test, assert_instr(rbit))]
a2a8927a 5947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5948pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
5949 transmute(vrbitq_s8(transmute(a)))
5950}
5951
5952/// Floating-point round to integral exact, using current rounding mode
f2b60f7d
FG
5953///
5954/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)
17df50a5
XL
5955#[inline]
5956#[target_feature(enable = "neon")]
5957#[cfg_attr(test, assert_instr(frintx))]
a2a8927a 5958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5959pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t {
5960 #[allow(improper_ctypes)]
c295e0f8 5961 extern "unadjusted" {
17df50a5
XL
5962 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v2f32")]
5963 fn vrndx_f32_(a: float32x2_t) -> float32x2_t;
5964 }
5965 vrndx_f32_(a)
5966}
5967
5968/// Floating-point round to integral exact, using current rounding mode
f2b60f7d
FG
5969///
5970/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)
17df50a5
XL
5971#[inline]
5972#[target_feature(enable = "neon")]
5973#[cfg_attr(test, assert_instr(frintx))]
a2a8927a 5974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5975pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
5976 #[allow(improper_ctypes)]
c295e0f8 5977 extern "unadjusted" {
17df50a5
XL
5978 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v4f32")]
5979 fn vrndxq_f32_(a: float32x4_t) -> float32x4_t;
5980 }
5981 vrndxq_f32_(a)
5982}
5983
5984/// Floating-point round to integral exact, using current rounding mode
f2b60f7d
FG
5985///
5986/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)
17df50a5
XL
5987#[inline]
5988#[target_feature(enable = "neon")]
5989#[cfg_attr(test, assert_instr(frintx))]
a2a8927a 5990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
5991pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t {
5992 #[allow(improper_ctypes)]
c295e0f8 5993 extern "unadjusted" {
17df50a5
XL
5994 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v1f64")]
5995 fn vrndx_f64_(a: float64x1_t) -> float64x1_t;
5996 }
5997 vrndx_f64_(a)
5998}
5999
6000/// Floating-point round to integral exact, using current rounding mode
f2b60f7d
FG
6001///
6002/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)
17df50a5
XL
6003#[inline]
6004#[target_feature(enable = "neon")]
6005#[cfg_attr(test, assert_instr(frintx))]
a2a8927a 6006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6007pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
6008 #[allow(improper_ctypes)]
c295e0f8 6009 extern "unadjusted" {
17df50a5
XL
6010 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v2f64")]
6011 fn vrndxq_f64_(a: float64x2_t) -> float64x2_t;
6012 }
6013 vrndxq_f64_(a)
6014}
6015
6016/// Floating-point round to integral, to nearest with ties to away
f2b60f7d
FG
6017///
6018/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)
17df50a5
XL
6019#[inline]
6020#[target_feature(enable = "neon")]
6021#[cfg_attr(test, assert_instr(frinta))]
a2a8927a 6022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6023pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t {
6024 #[allow(improper_ctypes)]
c295e0f8 6025 extern "unadjusted" {
17df50a5
XL
6026 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v2f32")]
6027 fn vrnda_f32_(a: float32x2_t) -> float32x2_t;
6028 }
6029 vrnda_f32_(a)
6030}
6031
6032/// Floating-point round to integral, to nearest with ties to away
f2b60f7d
FG
6033///
6034/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)
17df50a5
XL
6035#[inline]
6036#[target_feature(enable = "neon")]
6037#[cfg_attr(test, assert_instr(frinta))]
a2a8927a 6038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6039pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
6040 #[allow(improper_ctypes)]
c295e0f8 6041 extern "unadjusted" {
17df50a5
XL
6042 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v4f32")]
6043 fn vrndaq_f32_(a: float32x4_t) -> float32x4_t;
6044 }
6045 vrndaq_f32_(a)
6046}
6047
6048/// Floating-point round to integral, to nearest with ties to away
f2b60f7d
FG
6049///
6050/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)
17df50a5
XL
6051#[inline]
6052#[target_feature(enable = "neon")]
6053#[cfg_attr(test, assert_instr(frinta))]
a2a8927a 6054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6055pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t {
6056 #[allow(improper_ctypes)]
c295e0f8 6057 extern "unadjusted" {
17df50a5
XL
6058 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v1f64")]
6059 fn vrnda_f64_(a: float64x1_t) -> float64x1_t;
6060 }
6061 vrnda_f64_(a)
6062}
6063
6064/// Floating-point round to integral, to nearest with ties to away
f2b60f7d
FG
6065///
6066/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)
17df50a5
XL
6067#[inline]
6068#[target_feature(enable = "neon")]
6069#[cfg_attr(test, assert_instr(frinta))]
a2a8927a 6070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6071pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
6072 #[allow(improper_ctypes)]
c295e0f8 6073 extern "unadjusted" {
17df50a5
XL
6074 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v2f64")]
6075 fn vrndaq_f64_(a: float64x2_t) -> float64x2_t;
6076 }
6077 vrndaq_f64_(a)
6078}
6079
6080/// Floating-point round to integral, to nearest with ties to even
f2b60f7d
FG
6081///
6082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)
17df50a5
XL
6083#[inline]
6084#[target_feature(enable = "neon")]
6085#[cfg_attr(test, assert_instr(frintn))]
a2a8927a 6086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6087pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t {
6088 #[allow(improper_ctypes)]
c295e0f8 6089 extern "unadjusted" {
17df50a5
XL
6090 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v1f64")]
6091 fn vrndn_f64_(a: float64x1_t) -> float64x1_t;
6092 }
6093 vrndn_f64_(a)
6094}
6095
6096/// Floating-point round to integral, to nearest with ties to even
f2b60f7d
FG
6097///
6098/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)
17df50a5
XL
6099#[inline]
6100#[target_feature(enable = "neon")]
6101#[cfg_attr(test, assert_instr(frintn))]
a2a8927a 6102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6103pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
6104 #[allow(improper_ctypes)]
c295e0f8 6105 extern "unadjusted" {
17df50a5
XL
6106 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v2f64")]
6107 fn vrndnq_f64_(a: float64x2_t) -> float64x2_t;
6108 }
6109 vrndnq_f64_(a)
6110}
6111
3c0e092e 6112/// Floating-point round to integral, to nearest with ties to even
f2b60f7d
FG
6113///
6114/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)
3c0e092e
XL
6115#[inline]
6116#[target_feature(enable = "neon")]
6117#[cfg_attr(test, assert_instr(frintn))]
a2a8927a 6118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6119pub unsafe fn vrndns_f32(a: f32) -> f32 {
6120 #[allow(improper_ctypes)]
6121 extern "unadjusted" {
6122 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.roundeven.f32")]
6123 fn vrndns_f32_(a: f32) -> f32;
6124 }
6125 vrndns_f32_(a)
6126}
6127
17df50a5 6128/// Floating-point round to integral, toward minus infinity
f2b60f7d
FG
6129///
6130/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)
17df50a5
XL
6131#[inline]
6132#[target_feature(enable = "neon")]
6133#[cfg_attr(test, assert_instr(frintm))]
a2a8927a 6134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6135pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t {
6136 #[allow(improper_ctypes)]
c295e0f8 6137 extern "unadjusted" {
17df50a5
XL
6138 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v2f32")]
6139 fn vrndm_f32_(a: float32x2_t) -> float32x2_t;
6140 }
6141 vrndm_f32_(a)
6142}
6143
6144/// Floating-point round to integral, toward minus infinity
f2b60f7d
FG
6145///
6146/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)
17df50a5
XL
6147#[inline]
6148#[target_feature(enable = "neon")]
6149#[cfg_attr(test, assert_instr(frintm))]
a2a8927a 6150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6151pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
6152 #[allow(improper_ctypes)]
c295e0f8 6153 extern "unadjusted" {
17df50a5
XL
6154 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v4f32")]
6155 fn vrndmq_f32_(a: float32x4_t) -> float32x4_t;
6156 }
6157 vrndmq_f32_(a)
6158}
6159
6160/// Floating-point round to integral, toward minus infinity
f2b60f7d
FG
6161///
6162/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)
17df50a5
XL
6163#[inline]
6164#[target_feature(enable = "neon")]
6165#[cfg_attr(test, assert_instr(frintm))]
a2a8927a 6166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6167pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t {
6168 #[allow(improper_ctypes)]
c295e0f8 6169 extern "unadjusted" {
17df50a5
XL
6170 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v1f64")]
6171 fn vrndm_f64_(a: float64x1_t) -> float64x1_t;
6172 }
6173 vrndm_f64_(a)
6174}
6175
6176/// Floating-point round to integral, toward minus infinity
f2b60f7d
FG
6177///
6178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)
17df50a5
XL
6179#[inline]
6180#[target_feature(enable = "neon")]
6181#[cfg_attr(test, assert_instr(frintm))]
a2a8927a 6182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6183pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
6184 #[allow(improper_ctypes)]
c295e0f8 6185 extern "unadjusted" {
17df50a5
XL
6186 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v2f64")]
6187 fn vrndmq_f64_(a: float64x2_t) -> float64x2_t;
6188 }
6189 vrndmq_f64_(a)
6190}
6191
6192/// Floating-point round to integral, toward plus infinity
f2b60f7d
FG
6193///
6194/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)
17df50a5
XL
6195#[inline]
6196#[target_feature(enable = "neon")]
6197#[cfg_attr(test, assert_instr(frintp))]
a2a8927a 6198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6199pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t {
6200 #[allow(improper_ctypes)]
c295e0f8 6201 extern "unadjusted" {
17df50a5
XL
6202 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v2f32")]
6203 fn vrndp_f32_(a: float32x2_t) -> float32x2_t;
6204 }
6205 vrndp_f32_(a)
6206}
6207
6208/// Floating-point round to integral, toward plus infinity
f2b60f7d
FG
6209///
6210/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)
17df50a5
XL
6211#[inline]
6212#[target_feature(enable = "neon")]
6213#[cfg_attr(test, assert_instr(frintp))]
a2a8927a 6214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6215pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
6216 #[allow(improper_ctypes)]
c295e0f8 6217 extern "unadjusted" {
17df50a5
XL
6218 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v4f32")]
6219 fn vrndpq_f32_(a: float32x4_t) -> float32x4_t;
6220 }
6221 vrndpq_f32_(a)
6222}
6223
6224/// Floating-point round to integral, toward plus infinity
f2b60f7d
FG
6225///
6226/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)
17df50a5
XL
6227#[inline]
6228#[target_feature(enable = "neon")]
6229#[cfg_attr(test, assert_instr(frintp))]
a2a8927a 6230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6231pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t {
6232 #[allow(improper_ctypes)]
c295e0f8 6233 extern "unadjusted" {
17df50a5
XL
6234 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v1f64")]
6235 fn vrndp_f64_(a: float64x1_t) -> float64x1_t;
6236 }
6237 vrndp_f64_(a)
6238}
6239
6240/// Floating-point round to integral, toward plus infinity
f2b60f7d
FG
6241///
6242/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)
17df50a5
XL
6243#[inline]
6244#[target_feature(enable = "neon")]
6245#[cfg_attr(test, assert_instr(frintp))]
a2a8927a 6246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6247pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
6248 #[allow(improper_ctypes)]
c295e0f8 6249 extern "unadjusted" {
17df50a5
XL
6250 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v2f64")]
6251 fn vrndpq_f64_(a: float64x2_t) -> float64x2_t;
6252 }
6253 vrndpq_f64_(a)
6254}
6255
6256/// Floating-point round to integral, toward zero
f2b60f7d
FG
6257///
6258/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)
17df50a5
XL
6259#[inline]
6260#[target_feature(enable = "neon")]
6261#[cfg_attr(test, assert_instr(frintz))]
a2a8927a 6262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6263pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t {
6264 #[allow(improper_ctypes)]
c295e0f8 6265 extern "unadjusted" {
17df50a5
XL
6266 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v2f32")]
6267 fn vrnd_f32_(a: float32x2_t) -> float32x2_t;
6268 }
6269 vrnd_f32_(a)
6270}
6271
6272/// Floating-point round to integral, toward zero
f2b60f7d
FG
6273///
6274/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)
17df50a5
XL
6275#[inline]
6276#[target_feature(enable = "neon")]
6277#[cfg_attr(test, assert_instr(frintz))]
a2a8927a 6278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6279pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t {
6280 #[allow(improper_ctypes)]
c295e0f8 6281 extern "unadjusted" {
17df50a5
XL
6282 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v4f32")]
6283 fn vrndq_f32_(a: float32x4_t) -> float32x4_t;
6284 }
6285 vrndq_f32_(a)
6286}
6287
6288/// Floating-point round to integral, toward zero
f2b60f7d
FG
6289///
6290/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)
17df50a5
XL
6291#[inline]
6292#[target_feature(enable = "neon")]
6293#[cfg_attr(test, assert_instr(frintz))]
a2a8927a 6294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6295pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t {
6296 #[allow(improper_ctypes)]
c295e0f8 6297 extern "unadjusted" {
17df50a5
XL
6298 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v1f64")]
6299 fn vrnd_f64_(a: float64x1_t) -> float64x1_t;
6300 }
6301 vrnd_f64_(a)
6302}
6303
6304/// Floating-point round to integral, toward zero
f2b60f7d
FG
6305///
6306/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)
17df50a5
XL
6307#[inline]
6308#[target_feature(enable = "neon")]
6309#[cfg_attr(test, assert_instr(frintz))]
a2a8927a 6310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6311pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t {
6312 #[allow(improper_ctypes)]
c295e0f8 6313 extern "unadjusted" {
17df50a5
XL
6314 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v2f64")]
6315 fn vrndq_f64_(a: float64x2_t) -> float64x2_t;
6316 }
6317 vrndq_f64_(a)
6318}
6319
6320/// Floating-point round to integral, using current rounding mode
f2b60f7d
FG
6321///
6322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)
17df50a5
XL
6323#[inline]
6324#[target_feature(enable = "neon")]
6325#[cfg_attr(test, assert_instr(frinti))]
a2a8927a 6326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6327pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t {
6328 #[allow(improper_ctypes)]
c295e0f8 6329 extern "unadjusted" {
17df50a5
XL
6330 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v2f32")]
6331 fn vrndi_f32_(a: float32x2_t) -> float32x2_t;
6332 }
6333 vrndi_f32_(a)
6334}
6335
6336/// Floating-point round to integral, using current rounding mode
f2b60f7d
FG
6337///
6338/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)
17df50a5
XL
6339#[inline]
6340#[target_feature(enable = "neon")]
6341#[cfg_attr(test, assert_instr(frinti))]
a2a8927a 6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6343pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
6344 #[allow(improper_ctypes)]
c295e0f8 6345 extern "unadjusted" {
17df50a5
XL
6346 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v4f32")]
6347 fn vrndiq_f32_(a: float32x4_t) -> float32x4_t;
6348 }
6349 vrndiq_f32_(a)
6350}
6351
6352/// Floating-point round to integral, using current rounding mode
f2b60f7d
FG
6353///
6354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)
17df50a5
XL
6355#[inline]
6356#[target_feature(enable = "neon")]
6357#[cfg_attr(test, assert_instr(frinti))]
a2a8927a 6358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6359pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t {
6360 #[allow(improper_ctypes)]
c295e0f8 6361 extern "unadjusted" {
17df50a5
XL
6362 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v1f64")]
6363 fn vrndi_f64_(a: float64x1_t) -> float64x1_t;
6364 }
6365 vrndi_f64_(a)
6366}
6367
6368/// Floating-point round to integral, using current rounding mode
f2b60f7d
FG
6369///
6370/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)
17df50a5
XL
6371#[inline]
6372#[target_feature(enable = "neon")]
6373#[cfg_attr(test, assert_instr(frinti))]
a2a8927a 6374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6375pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
6376 #[allow(improper_ctypes)]
c295e0f8 6377 extern "unadjusted" {
17df50a5
XL
6378 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v2f64")]
6379 fn vrndiq_f64_(a: float64x2_t) -> float64x2_t;
6380 }
6381 vrndiq_f64_(a)
6382}
6383
6384/// Saturating add
f2b60f7d
FG
6385///
6386/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)
17df50a5
XL
6387#[inline]
6388#[target_feature(enable = "neon")]
6389#[cfg_attr(test, assert_instr(sqadd))]
a2a8927a 6390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6391pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 {
6392 let a: int8x8_t = vdup_n_s8(a);
6393 let b: int8x8_t = vdup_n_s8(b);
6394 simd_extract(vqadd_s8(a, b), 0)
6395}
6396
6397/// Saturating add
f2b60f7d
FG
6398///
6399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)
17df50a5
XL
6400#[inline]
6401#[target_feature(enable = "neon")]
6402#[cfg_attr(test, assert_instr(sqadd))]
a2a8927a 6403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6404pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 {
6405 let a: int16x4_t = vdup_n_s16(a);
6406 let b: int16x4_t = vdup_n_s16(b);
6407 simd_extract(vqadd_s16(a, b), 0)
6408}
6409
6410/// Saturating add
f2b60f7d
FG
6411///
6412/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)
17df50a5
XL
6413#[inline]
6414#[target_feature(enable = "neon")]
6415#[cfg_attr(test, assert_instr(uqadd))]
a2a8927a 6416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6417pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 {
6418 let a: uint8x8_t = vdup_n_u8(a);
6419 let b: uint8x8_t = vdup_n_u8(b);
6420 simd_extract(vqadd_u8(a, b), 0)
6421}
6422
6423/// Saturating add
f2b60f7d
FG
6424///
6425/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)
17df50a5
XL
6426#[inline]
6427#[target_feature(enable = "neon")]
6428#[cfg_attr(test, assert_instr(uqadd))]
a2a8927a 6429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6430pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 {
6431 let a: uint16x4_t = vdup_n_u16(a);
6432 let b: uint16x4_t = vdup_n_u16(b);
6433 simd_extract(vqadd_u16(a, b), 0)
6434}
6435
6436/// Saturating add
f2b60f7d
FG
6437///
6438/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)
17df50a5
XL
6439#[inline]
6440#[target_feature(enable = "neon")]
6441#[cfg_attr(test, assert_instr(uqadd))]
a2a8927a 6442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6443pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 {
6444 #[allow(improper_ctypes)]
c295e0f8 6445 extern "unadjusted" {
17df50a5
XL
6446 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.i32")]
6447 fn vqadds_u32_(a: u32, b: u32) -> u32;
6448 }
6449 vqadds_u32_(a, b)
6450}
6451
6452/// Saturating add
f2b60f7d
FG
6453///
6454/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)
17df50a5
XL
6455#[inline]
6456#[target_feature(enable = "neon")]
6457#[cfg_attr(test, assert_instr(uqadd))]
a2a8927a 6458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6459pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 {
6460 #[allow(improper_ctypes)]
c295e0f8 6461 extern "unadjusted" {
17df50a5
XL
6462 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.i64")]
6463 fn vqaddd_u64_(a: u64, b: u64) -> u64;
6464 }
6465 vqaddd_u64_(a, b)
6466}
6467
6468/// Saturating add
f2b60f7d
FG
6469///
6470/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)
17df50a5
XL
6471#[inline]
6472#[target_feature(enable = "neon")]
6473#[cfg_attr(test, assert_instr(sqadd))]
a2a8927a 6474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6475pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 {
6476 #[allow(improper_ctypes)]
c295e0f8 6477 extern "unadjusted" {
17df50a5
XL
6478 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.i32")]
6479 fn vqadds_s32_(a: i32, b: i32) -> i32;
6480 }
6481 vqadds_s32_(a, b)
6482}
6483
6484/// Saturating add
f2b60f7d
FG
6485///
6486/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)
17df50a5
XL
6487#[inline]
6488#[target_feature(enable = "neon")]
6489#[cfg_attr(test, assert_instr(sqadd))]
a2a8927a 6490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17df50a5
XL
6491pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 {
6492 #[allow(improper_ctypes)]
c295e0f8 6493 extern "unadjusted" {
17df50a5
XL
6494 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.i64")]
6495 fn vqaddd_s64_(a: i64, b: i64) -> i64;
6496 }
6497 vqaddd_s64_(a, b)
6498}
6499
c295e0f8 6500/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6501///
6502/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)
c295e0f8
XL
6503#[inline]
6504#[target_feature(enable = "neon")]
6505#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6507pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
6508 #[allow(improper_ctypes)]
6509 extern "unadjusted" {
6510 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64")]
6511 fn vld1_f64_x2_(a: *const f64) -> float64x1x2_t;
6512 }
6513 vld1_f64_x2_(a)
6514}
6515
6516/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6517///
6518/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)
c295e0f8
XL
6519#[inline]
6520#[target_feature(enable = "neon")]
6521#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6523pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
6524 #[allow(improper_ctypes)]
6525 extern "unadjusted" {
6526 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64")]
6527 fn vld1q_f64_x2_(a: *const f64) -> float64x2x2_t;
6528 }
6529 vld1q_f64_x2_(a)
6530}
6531
6532/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6533///
6534/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)
c295e0f8
XL
6535#[inline]
6536#[target_feature(enable = "neon")]
6537#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6539pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
6540 #[allow(improper_ctypes)]
6541 extern "unadjusted" {
6542 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64")]
6543 fn vld1_f64_x3_(a: *const f64) -> float64x1x3_t;
6544 }
6545 vld1_f64_x3_(a)
6546}
6547
6548/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6549///
6550/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)
c295e0f8
XL
6551#[inline]
6552#[target_feature(enable = "neon")]
6553#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6555pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
6556 #[allow(improper_ctypes)]
6557 extern "unadjusted" {
6558 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64")]
6559 fn vld1q_f64_x3_(a: *const f64) -> float64x2x3_t;
6560 }
6561 vld1q_f64_x3_(a)
6562}
6563
6564/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6565///
6566/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)
c295e0f8
XL
6567#[inline]
6568#[target_feature(enable = "neon")]
6569#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6571pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
6572 #[allow(improper_ctypes)]
6573 extern "unadjusted" {
6574 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64")]
6575 fn vld1_f64_x4_(a: *const f64) -> float64x1x4_t;
6576 }
6577 vld1_f64_x4_(a)
6578}
6579
6580/// Load multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
6581///
6582/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)
c295e0f8
XL
6583#[inline]
6584#[target_feature(enable = "neon")]
6585#[cfg_attr(test, assert_instr(ld1))]
a2a8927a 6586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
c295e0f8
XL
6587pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
6588 #[allow(improper_ctypes)]
6589 extern "unadjusted" {
6590 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64")]
6591 fn vld1q_f64_x4_(a: *const f64) -> float64x2x4_t;
6592 }
6593 vld1q_f64_x4_(a)
6594}
6595
3c0e092e 6596/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6597///
6598/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)
c295e0f8
XL
6599#[inline]
6600#[target_feature(enable = "neon")]
3c0e092e 6601#[cfg_attr(test, assert_instr(ld2))]
a2a8927a 6602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6603pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
c295e0f8
XL
6604 #[allow(improper_ctypes)]
6605 extern "unadjusted" {
3c0e092e
XL
6606 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64")]
6607 fn vld2q_s64_(ptr: *const int64x2_t) -> int64x2x2_t;
c295e0f8 6608 }
a2a8927a 6609 vld2q_s64_(a as _)
c295e0f8
XL
6610}
6611
3c0e092e 6612/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6613///
6614/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)
c295e0f8
XL
6615#[inline]
6616#[target_feature(enable = "neon")]
3c0e092e 6617#[cfg_attr(test, assert_instr(ld2))]
a2a8927a 6618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6619pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
6620 transmute(vld2q_s64(transmute(a)))
c295e0f8
XL
6621}
6622
3c0e092e 6623/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6624///
6625/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)
3c0e092e
XL
6626#[inline]
6627#[target_feature(enable = "neon,aes")]
6628#[cfg_attr(test, assert_instr(ld2))]
a2a8927a 6629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6630pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
6631 transmute(vld2q_s64(transmute(a)))
6632}
6633
6634/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6635///
6636/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)
c295e0f8
XL
6637#[inline]
6638#[target_feature(enable = "neon")]
3c0e092e 6639#[cfg_attr(test, assert_instr(nop))]
a2a8927a 6640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6641pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
c295e0f8
XL
6642 #[allow(improper_ctypes)]
6643 extern "unadjusted" {
3c0e092e
XL
6644 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64")]
6645 fn vld2_f64_(ptr: *const float64x1_t) -> float64x1x2_t;
c295e0f8 6646 }
a2a8927a 6647 vld2_f64_(a as _)
c295e0f8
XL
6648}
6649
3c0e092e 6650/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6651///
6652/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)
c295e0f8
XL
6653#[inline]
6654#[target_feature(enable = "neon")]
3c0e092e 6655#[cfg_attr(test, assert_instr(ld2))]
a2a8927a 6656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6657pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
c295e0f8
XL
6658 #[allow(improper_ctypes)]
6659 extern "unadjusted" {
3c0e092e
XL
6660 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64")]
6661 fn vld2q_f64_(ptr: *const float64x2_t) -> float64x2x2_t;
c295e0f8 6662 }
a2a8927a 6663 vld2q_f64_(a as _)
c295e0f8
XL
6664}
6665
3c0e092e 6666/// Load single 2-element structure and replicate to all lanes of two registers
f2b60f7d
FG
6667///
6668/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)
c295e0f8
XL
6669#[inline]
6670#[target_feature(enable = "neon")]
3c0e092e 6671#[cfg_attr(test, assert_instr(ld2r))]
a2a8927a 6672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6673pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
c295e0f8
XL
6674 #[allow(improper_ctypes)]
6675 extern "unadjusted" {
3c0e092e
XL
6676 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64")]
6677 fn vld2q_dup_s64_(ptr: *const i64) -> int64x2x2_t;
c295e0f8 6678 }
a2a8927a 6679 vld2q_dup_s64_(a as _)
c295e0f8
XL
6680}
6681
3c0e092e 6682/// Load single 2-element structure and replicate to all lanes of two registers
f2b60f7d
FG
6683///
6684/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)
c295e0f8
XL
6685#[inline]
6686#[target_feature(enable = "neon")]
3c0e092e 6687#[cfg_attr(test, assert_instr(ld2r))]
a2a8927a 6688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6689pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
6690 transmute(vld2q_dup_s64(transmute(a)))
6691}
6692
6693/// Load single 2-element structure and replicate to all lanes of two registers
f2b60f7d
FG
6694///
6695/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)
3c0e092e
XL
6696#[inline]
6697#[target_feature(enable = "neon,aes")]
6698#[cfg_attr(test, assert_instr(ld2r))]
a2a8927a 6699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6700pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
6701 transmute(vld2q_dup_s64(transmute(a)))
6702}
6703
6704/// Load single 2-element structure and replicate to all lanes of two registers
f2b60f7d
FG
6705///
6706/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)
3c0e092e
XL
6707#[inline]
6708#[target_feature(enable = "neon")]
6709#[cfg_attr(test, assert_instr(ld2r))]
a2a8927a 6710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6711pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
c295e0f8
XL
6712 #[allow(improper_ctypes)]
6713 extern "unadjusted" {
3c0e092e
XL
6714 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64")]
6715 fn vld2_dup_f64_(ptr: *const f64) -> float64x1x2_t;
c295e0f8 6716 }
a2a8927a 6717 vld2_dup_f64_(a as _)
c295e0f8
XL
6718}
6719
3c0e092e 6720/// Load single 2-element structure and replicate to all lanes of two registers
f2b60f7d
FG
6721///
6722/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)
17df50a5
XL
6723#[inline]
6724#[target_feature(enable = "neon")]
3c0e092e 6725#[cfg_attr(test, assert_instr(ld2r))]
a2a8927a 6726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6727pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
6728 #[allow(improper_ctypes)]
6729 extern "unadjusted" {
6730 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64")]
6731 fn vld2q_dup_f64_(ptr: *const f64) -> float64x2x2_t;
6732 }
a2a8927a 6733 vld2q_dup_f64_(a as _)
17df50a5
XL
6734}
6735
3c0e092e 6736/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6737///
6738/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)
17df50a5
XL
6739#[inline]
6740#[target_feature(enable = "neon")]
3c0e092e
XL
6741#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6742#[rustc_legacy_const_generics(2)]
a2a8927a 6743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6744pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
353b0b11 6745 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
6746 #[allow(improper_ctypes)]
6747 extern "unadjusted" {
6748 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8")]
6749 fn vld2q_lane_s8_(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
6750 }
a2a8927a 6751 vld2q_lane_s8_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
6752}
6753
3c0e092e 6754/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6755///
6756/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)
17df50a5
XL
6757#[inline]
6758#[target_feature(enable = "neon")]
3c0e092e
XL
6759#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6760#[rustc_legacy_const_generics(2)]
a2a8927a 6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6762pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
353b0b11 6763 static_assert!(LANE == 0);
3c0e092e
XL
6764 #[allow(improper_ctypes)]
6765 extern "unadjusted" {
6766 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8")]
6767 fn vld2_lane_s64_(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
6768 }
a2a8927a 6769 vld2_lane_s64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
6770}
6771
3c0e092e 6772/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6773///
6774/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)
17df50a5
XL
6775#[inline]
6776#[target_feature(enable = "neon")]
3c0e092e
XL
6777#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6778#[rustc_legacy_const_generics(2)]
a2a8927a 6779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6780pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
353b0b11 6781 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
6782 #[allow(improper_ctypes)]
6783 extern "unadjusted" {
6784 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8")]
6785 fn vld2q_lane_s64_(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
6786 }
a2a8927a 6787 vld2q_lane_s64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
6788}
6789
3c0e092e 6790/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6791///
6792/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)
17df50a5 6793#[inline]
3c0e092e
XL
6794#[target_feature(enable = "neon,aes")]
6795#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6796#[rustc_legacy_const_generics(2)]
a2a8927a 6797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6798pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
353b0b11 6799 static_assert!(LANE == 0);
3c0e092e 6800 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6801}
6802
3c0e092e 6803/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6804///
6805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)
17df50a5 6806#[inline]
3c0e092e
XL
6807#[target_feature(enable = "neon,aes")]
6808#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6809#[rustc_legacy_const_generics(2)]
a2a8927a 6810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6811pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
353b0b11 6812 static_assert_uimm_bits!(LANE, 1);
3c0e092e 6813 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6814}
6815
3c0e092e 6816/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6817///
6818/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)
17df50a5
XL
6819#[inline]
6820#[target_feature(enable = "neon")]
3c0e092e 6821#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6822#[rustc_legacy_const_generics(2)]
a2a8927a 6823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6824pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
353b0b11 6825 static_assert_uimm_bits!(LANE, 4);
3c0e092e 6826 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6827}
6828
3c0e092e 6829/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6830///
6831/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)
17df50a5
XL
6832#[inline]
6833#[target_feature(enable = "neon")]
3c0e092e 6834#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6835#[rustc_legacy_const_generics(2)]
a2a8927a 6836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6837pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
353b0b11 6838 static_assert!(LANE == 0);
3c0e092e 6839 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6840}
6841
3c0e092e 6842/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6843///
6844/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)
17df50a5
XL
6845#[inline]
6846#[target_feature(enable = "neon")]
3c0e092e 6847#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6848#[rustc_legacy_const_generics(2)]
a2a8927a 6849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6850pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
353b0b11 6851 static_assert_uimm_bits!(LANE, 1);
3c0e092e 6852 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6853}
6854
3c0e092e 6855/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6856///
6857/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)
17df50a5
XL
6858#[inline]
6859#[target_feature(enable = "neon")]
3c0e092e 6860#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6861#[rustc_legacy_const_generics(2)]
a2a8927a 6862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6863pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
353b0b11 6864 static_assert_uimm_bits!(LANE, 4);
3c0e092e 6865 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
6866}
6867
3c0e092e 6868/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6869///
6870/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)
17df50a5
XL
6871#[inline]
6872#[target_feature(enable = "neon")]
3c0e092e 6873#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6874#[rustc_legacy_const_generics(2)]
a2a8927a 6875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6876pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
353b0b11 6877 static_assert!(LANE == 0);
3c0e092e
XL
6878 #[allow(improper_ctypes)]
6879 extern "unadjusted" {
6880 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8")]
6881 fn vld2_lane_f64_(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
6882 }
a2a8927a 6883 vld2_lane_f64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
6884}
6885
3c0e092e 6886/// Load multiple 2-element structures to two registers
f2b60f7d
FG
6887///
6888/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)
17df50a5
XL
6889#[inline]
6890#[target_feature(enable = "neon")]
3c0e092e 6891#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
17df50a5 6892#[rustc_legacy_const_generics(2)]
a2a8927a 6893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 6894pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
353b0b11 6895 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
6896 #[allow(improper_ctypes)]
6897 extern "unadjusted" {
6898 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8")]
6899 fn vld2q_lane_f64_(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) -> float64x2x2_t;
6900 }
a2a8927a 6901 vld2q_lane_f64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
6902}
6903
3c0e092e 6904/// Load multiple 3-element structures to three registers
f2b60f7d
FG
6905///
6906/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)
17df50a5
XL
6907#[inline]
6908#[target_feature(enable = "neon")]
3c0e092e 6909#[cfg_attr(test, assert_instr(ld3))]
a2a8927a 6910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6911pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
6912 #[allow(improper_ctypes)]
6913 extern "unadjusted" {
6914 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64")]
6915 fn vld3q_s64_(ptr: *const int64x2_t) -> int64x2x3_t;
6916 }
a2a8927a 6917 vld3q_s64_(a as _)
17df50a5
XL
6918}
6919
3c0e092e 6920/// Load multiple 3-element structures to three registers
f2b60f7d
FG
6921///
6922/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)
17df50a5
XL
6923#[inline]
6924#[target_feature(enable = "neon")]
3c0e092e 6925#[cfg_attr(test, assert_instr(ld3))]
a2a8927a 6926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6927pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
6928 transmute(vld3q_s64(transmute(a)))
17df50a5
XL
6929}
6930
3c0e092e 6931/// Load multiple 3-element structures to three registers
f2b60f7d
FG
6932///
6933/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)
3c0e092e
XL
6934#[inline]
6935#[target_feature(enable = "neon,aes")]
6936#[cfg_attr(test, assert_instr(ld3))]
a2a8927a 6937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6938pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
6939 transmute(vld3q_s64(transmute(a)))
6940}
6941
6942/// Load multiple 3-element structures to three registers
f2b60f7d
FG
6943///
6944/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)
17df50a5
XL
6945#[inline]
6946#[target_feature(enable = "neon")]
3c0e092e 6947#[cfg_attr(test, assert_instr(nop))]
a2a8927a 6948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6949pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
6950 #[allow(improper_ctypes)]
6951 extern "unadjusted" {
6952 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64")]
6953 fn vld3_f64_(ptr: *const float64x1_t) -> float64x1x3_t;
6954 }
a2a8927a 6955 vld3_f64_(a as _)
17df50a5
XL
6956}
6957
3c0e092e 6958/// Load multiple 3-element structures to three registers
f2b60f7d
FG
6959///
6960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)
17df50a5
XL
6961#[inline]
6962#[target_feature(enable = "neon")]
3c0e092e 6963#[cfg_attr(test, assert_instr(ld3))]
a2a8927a 6964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6965pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
6966 #[allow(improper_ctypes)]
6967 extern "unadjusted" {
6968 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64")]
6969 fn vld3q_f64_(ptr: *const float64x2_t) -> float64x2x3_t;
6970 }
a2a8927a 6971 vld3q_f64_(a as _)
17df50a5
XL
6972}
6973
3c0e092e 6974/// Load single 3-element structure and replicate to all lanes of three registers
f2b60f7d
FG
6975///
6976/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)
17df50a5
XL
6977#[inline]
6978#[target_feature(enable = "neon")]
3c0e092e 6979#[cfg_attr(test, assert_instr(ld3r))]
a2a8927a 6980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6981pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
6982 #[allow(improper_ctypes)]
6983 extern "unadjusted" {
6984 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64")]
6985 fn vld3q_dup_s64_(ptr: *const i64) -> int64x2x3_t;
6986 }
a2a8927a 6987 vld3q_dup_s64_(a as _)
17df50a5
XL
6988}
6989
3c0e092e 6990/// Load single 3-element structure and replicate to all lanes of three registers
f2b60f7d
FG
6991///
6992/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)
17df50a5
XL
6993#[inline]
6994#[target_feature(enable = "neon")]
3c0e092e 6995#[cfg_attr(test, assert_instr(ld3r))]
a2a8927a 6996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
6997pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
6998 transmute(vld3q_dup_s64(transmute(a)))
17df50a5
XL
6999}
7000
3c0e092e 7001/// Load single 3-element structure and replicate to all lanes of three registers
f2b60f7d
FG
7002///
7003/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)
17df50a5 7004#[inline]
94222f64 7005#[target_feature(enable = "neon,aes")]
3c0e092e 7006#[cfg_attr(test, assert_instr(ld3r))]
a2a8927a 7007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7008pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
7009 transmute(vld3q_dup_s64(transmute(a)))
7010}
7011
7012/// Load single 3-element structure and replicate to all lanes of three registers
f2b60f7d
FG
7013///
7014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)
3c0e092e
XL
7015#[inline]
7016#[target_feature(enable = "neon")]
7017#[cfg_attr(test, assert_instr(ld3r))]
a2a8927a 7018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7019pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
17df50a5 7020 #[allow(improper_ctypes)]
c295e0f8 7021 extern "unadjusted" {
3c0e092e
XL
7022 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64")]
7023 fn vld3_dup_f64_(ptr: *const f64) -> float64x1x3_t;
17df50a5 7024 }
a2a8927a 7025 vld3_dup_f64_(a as _)
17df50a5
XL
7026}
7027
3c0e092e 7028/// Load single 3-element structure and replicate to all lanes of three registers
f2b60f7d
FG
7029///
7030/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)
17df50a5
XL
7031#[inline]
7032#[target_feature(enable = "neon")]
3c0e092e 7033#[cfg_attr(test, assert_instr(ld3r))]
a2a8927a 7034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7035pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
7036 #[allow(improper_ctypes)]
7037 extern "unadjusted" {
7038 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64")]
7039 fn vld3q_dup_f64_(ptr: *const f64) -> float64x2x3_t;
7040 }
a2a8927a 7041 vld3q_dup_f64_(a as _)
17df50a5
XL
7042}
7043
3c0e092e 7044/// Load multiple 3-element structures to two registers
f2b60f7d
FG
7045///
7046/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)
17df50a5 7047#[inline]
3c0e092e
XL
7048#[target_feature(enable = "neon")]
7049#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7050#[rustc_legacy_const_generics(2)]
a2a8927a 7051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7052pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
353b0b11 7053 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7054 #[allow(improper_ctypes)]
7055 extern "unadjusted" {
7056 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8")]
7057 fn vld3q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *const i8) -> int8x16x3_t;
7058 }
a2a8927a 7059 vld3q_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7060}
7061
3c0e092e 7062/// Load multiple 3-element structures to two registers
f2b60f7d
FG
7063///
7064/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)
17df50a5
XL
7065#[inline]
7066#[target_feature(enable = "neon")]
3c0e092e
XL
7067#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7068#[rustc_legacy_const_generics(2)]
a2a8927a 7069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7070pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
353b0b11 7071 static_assert!(LANE == 0);
3c0e092e
XL
7072 #[allow(improper_ctypes)]
7073 extern "unadjusted" {
7074 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8")]
7075 fn vld3_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *const i8) -> int64x1x3_t;
7076 }
a2a8927a 7077 vld3_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7078}
7079
3c0e092e 7080/// Load multiple 3-element structures to two registers
f2b60f7d
FG
7081///
7082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)
17df50a5
XL
7083#[inline]
7084#[target_feature(enable = "neon")]
3c0e092e
XL
7085#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7086#[rustc_legacy_const_generics(2)]
a2a8927a 7087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7088pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
353b0b11 7089 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7090 #[allow(improper_ctypes)]
7091 extern "unadjusted" {
7092 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8")]
7093 fn vld3q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *const i8) -> int64x2x3_t;
7094 }
a2a8927a 7095 vld3q_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7096}
7097
3c0e092e 7098/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7099///
7100/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)
17df50a5 7101#[inline]
3c0e092e
XL
7102#[target_feature(enable = "neon,aes")]
7103#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7104#[rustc_legacy_const_generics(2)]
a2a8927a 7105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7106pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
353b0b11 7107 static_assert!(LANE == 0);
3c0e092e 7108 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7109}
7110
3c0e092e 7111/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7112///
7113/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)
3c0e092e
XL
7114#[inline]
7115#[target_feature(enable = "neon,aes")]
7116#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7117#[rustc_legacy_const_generics(2)]
a2a8927a 7118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7119pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
353b0b11 7120 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7121 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
7122}
7123
7124/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7125///
7126/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)
17df50a5
XL
7127#[inline]
7128#[target_feature(enable = "neon")]
3c0e092e
XL
7129#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7130#[rustc_legacy_const_generics(2)]
a2a8927a 7131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7132pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
353b0b11 7133 static_assert_uimm_bits!(LANE, 4);
3c0e092e 7134 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7135}
7136
3c0e092e 7137/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7138///
7139/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)
17df50a5
XL
7140#[inline]
7141#[target_feature(enable = "neon")]
3c0e092e 7142#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
17df50a5 7143#[rustc_legacy_const_generics(2)]
a2a8927a 7144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7145pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
353b0b11 7146 static_assert_uimm_bits!(LANE, 4);
3c0e092e 7147 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7148}
7149
3c0e092e 7150/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7151///
7152/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)
17df50a5
XL
7153#[inline]
7154#[target_feature(enable = "neon")]
3c0e092e 7155#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
17df50a5 7156#[rustc_legacy_const_generics(2)]
a2a8927a 7157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7158pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
353b0b11 7159 static_assert!(LANE == 0);
3c0e092e 7160 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7161}
7162
3c0e092e 7163/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7164///
7165/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)
17df50a5
XL
7166#[inline]
7167#[target_feature(enable = "neon")]
3c0e092e 7168#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
17df50a5 7169#[rustc_legacy_const_generics(2)]
a2a8927a 7170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7171pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
353b0b11 7172 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7173 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7174}
7175
3c0e092e 7176/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7177///
7178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)
17df50a5
XL
7179#[inline]
7180#[target_feature(enable = "neon")]
3c0e092e 7181#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
17df50a5 7182#[rustc_legacy_const_generics(2)]
a2a8927a 7183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7184pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
353b0b11 7185 static_assert!(LANE == 0);
3c0e092e
XL
7186 #[allow(improper_ctypes)]
7187 extern "unadjusted" {
7188 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8")]
7189 fn vld3_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *const i8) -> float64x1x3_t;
7190 }
a2a8927a 7191 vld3_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7192}
7193
3c0e092e 7194/// Load multiple 3-element structures to three registers
f2b60f7d
FG
7195///
7196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)
17df50a5
XL
7197#[inline]
7198#[target_feature(enable = "neon")]
3c0e092e 7199#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
17df50a5 7200#[rustc_legacy_const_generics(2)]
a2a8927a 7201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7202pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
353b0b11 7203 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7204 #[allow(improper_ctypes)]
7205 extern "unadjusted" {
7206 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8")]
7207 fn vld3q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *const i8) -> float64x2x3_t;
7208 }
a2a8927a 7209 vld3q_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7210}
7211
3c0e092e 7212/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7213///
7214/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)
17df50a5
XL
7215#[inline]
7216#[target_feature(enable = "neon")]
3c0e092e 7217#[cfg_attr(test, assert_instr(ld4))]
a2a8927a 7218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7219pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
7220 #[allow(improper_ctypes)]
7221 extern "unadjusted" {
7222 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64")]
7223 fn vld4q_s64_(ptr: *const int64x2_t) -> int64x2x4_t;
7224 }
a2a8927a 7225 vld4q_s64_(a as _)
17df50a5
XL
7226}
7227
3c0e092e 7228/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7229///
7230/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)
17df50a5
XL
7231#[inline]
7232#[target_feature(enable = "neon")]
3c0e092e 7233#[cfg_attr(test, assert_instr(ld4))]
a2a8927a 7234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7235pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
7236 transmute(vld4q_s64(transmute(a)))
17df50a5
XL
7237}
7238
3c0e092e 7239/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7240///
7241/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)
3c0e092e
XL
7242#[inline]
7243#[target_feature(enable = "neon,aes")]
7244#[cfg_attr(test, assert_instr(ld4))]
a2a8927a 7245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7246pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
7247 transmute(vld4q_s64(transmute(a)))
7248}
7249
7250/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7251///
7252/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)
17df50a5
XL
7253#[inline]
7254#[target_feature(enable = "neon")]
3c0e092e 7255#[cfg_attr(test, assert_instr(nop))]
a2a8927a 7256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7257pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
7258 #[allow(improper_ctypes)]
7259 extern "unadjusted" {
7260 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64")]
7261 fn vld4_f64_(ptr: *const float64x1_t) -> float64x1x4_t;
7262 }
a2a8927a 7263 vld4_f64_(a as _)
17df50a5
XL
7264}
7265
3c0e092e 7266/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7267///
7268/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)
17df50a5
XL
7269#[inline]
7270#[target_feature(enable = "neon")]
3c0e092e 7271#[cfg_attr(test, assert_instr(ld4))]
a2a8927a 7272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7273pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
17df50a5 7274 #[allow(improper_ctypes)]
c295e0f8 7275 extern "unadjusted" {
3c0e092e
XL
7276 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64")]
7277 fn vld4q_f64_(ptr: *const float64x2_t) -> float64x2x4_t;
17df50a5 7278 }
a2a8927a 7279 vld4q_f64_(a as _)
17df50a5
XL
7280}
7281
3c0e092e 7282/// Load single 4-element structure and replicate to all lanes of four registers
f2b60f7d
FG
7283///
7284/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)
17df50a5
XL
7285#[inline]
7286#[target_feature(enable = "neon")]
3c0e092e 7287#[cfg_attr(test, assert_instr(ld4r))]
a2a8927a 7288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7289pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
17df50a5 7290 #[allow(improper_ctypes)]
c295e0f8 7291 extern "unadjusted" {
3c0e092e
XL
7292 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64")]
7293 fn vld4q_dup_s64_(ptr: *const i64) -> int64x2x4_t;
17df50a5 7294 }
a2a8927a 7295 vld4q_dup_s64_(a as _)
17df50a5
XL
7296}
7297
3c0e092e 7298/// Load single 4-element structure and replicate to all lanes of four registers
f2b60f7d
FG
7299///
7300/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)
17df50a5
XL
7301#[inline]
7302#[target_feature(enable = "neon")]
3c0e092e 7303#[cfg_attr(test, assert_instr(ld4r))]
a2a8927a 7304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7305pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
7306 transmute(vld4q_dup_s64(transmute(a)))
7307}
7308
7309/// Load single 4-element structure and replicate to all lanes of four registers
f2b60f7d
FG
7310///
7311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)
3c0e092e
XL
7312#[inline]
7313#[target_feature(enable = "neon,aes")]
7314#[cfg_attr(test, assert_instr(ld4r))]
a2a8927a 7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7316pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
7317 transmute(vld4q_dup_s64(transmute(a)))
7318}
7319
7320/// Load single 4-element structure and replicate to all lanes of four registers
f2b60f7d
FG
7321///
7322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)
3c0e092e
XL
7323#[inline]
7324#[target_feature(enable = "neon")]
7325#[cfg_attr(test, assert_instr(ld4r))]
a2a8927a 7326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7327pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
17df50a5 7328 #[allow(improper_ctypes)]
c295e0f8 7329 extern "unadjusted" {
3c0e092e
XL
7330 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64")]
7331 fn vld4_dup_f64_(ptr: *const f64) -> float64x1x4_t;
17df50a5 7332 }
a2a8927a 7333 vld4_dup_f64_(a as _)
17df50a5
XL
7334}
7335
3c0e092e 7336/// Load single 4-element structure and replicate to all lanes of four registers
f2b60f7d
FG
7337///
7338/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)
17df50a5
XL
7339#[inline]
7340#[target_feature(enable = "neon")]
3c0e092e 7341#[cfg_attr(test, assert_instr(ld4r))]
a2a8927a 7342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7343pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
17df50a5 7344 #[allow(improper_ctypes)]
c295e0f8 7345 extern "unadjusted" {
3c0e092e
XL
7346 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64")]
7347 fn vld4q_dup_f64_(ptr: *const f64) -> float64x2x4_t;
17df50a5 7348 }
a2a8927a 7349 vld4q_dup_f64_(a as _)
17df50a5
XL
7350}
7351
3c0e092e 7352/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7353///
7354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)
17df50a5
XL
7355#[inline]
7356#[target_feature(enable = "neon")]
3c0e092e 7357#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7358#[rustc_legacy_const_generics(2)]
a2a8927a 7359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7360pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
353b0b11 7361 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7362 #[allow(improper_ctypes)]
7363 extern "unadjusted" {
7364 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8")]
7365 fn vld4q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, n: i64, ptr: *const i8) -> int8x16x4_t;
7366 }
a2a8927a 7367 vld4q_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
7368}
7369
3c0e092e 7370/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7371///
7372/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)
17df50a5
XL
7373#[inline]
7374#[target_feature(enable = "neon")]
3c0e092e 7375#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7376#[rustc_legacy_const_generics(2)]
a2a8927a 7377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7378pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
353b0b11 7379 static_assert!(LANE == 0);
3c0e092e
XL
7380 #[allow(improper_ctypes)]
7381 extern "unadjusted" {
7382 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8")]
7383 fn vld4_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, n: i64, ptr: *const i8) -> int64x1x4_t;
7384 }
a2a8927a 7385 vld4_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
7386}
7387
3c0e092e 7388/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7389///
7390/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)
17df50a5
XL
7391#[inline]
7392#[target_feature(enable = "neon")]
3c0e092e 7393#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7394#[rustc_legacy_const_generics(2)]
a2a8927a 7395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7396pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
353b0b11 7397 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7398 #[allow(improper_ctypes)]
7399 extern "unadjusted" {
7400 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8")]
7401 fn vld4q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, n: i64, ptr: *const i8) -> int64x2x4_t;
7402 }
a2a8927a 7403 vld4q_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
7404}
7405
3c0e092e 7406/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7407///
7408/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)
17df50a5 7409#[inline]
3c0e092e
XL
7410#[target_feature(enable = "neon,aes")]
7411#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7412#[rustc_legacy_const_generics(2)]
a2a8927a 7413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7414pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
353b0b11 7415 static_assert!(LANE == 0);
3c0e092e 7416 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7417}
7418
3c0e092e 7419/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7420///
7421/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)
17df50a5 7422#[inline]
3c0e092e
XL
7423#[target_feature(enable = "neon,aes")]
7424#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7425#[rustc_legacy_const_generics(2)]
a2a8927a 7426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7427pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
353b0b11 7428 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7429 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7430}
7431
3c0e092e 7432/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7433///
7434/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)
17df50a5
XL
7435#[inline]
7436#[target_feature(enable = "neon")]
3c0e092e 7437#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7438#[rustc_legacy_const_generics(2)]
a2a8927a 7439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7440pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
353b0b11 7441 static_assert_uimm_bits!(LANE, 4);
3c0e092e 7442 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7443}
7444
3c0e092e 7445/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7446///
7447/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)
17df50a5
XL
7448#[inline]
7449#[target_feature(enable = "neon")]
3c0e092e 7450#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7451#[rustc_legacy_const_generics(2)]
a2a8927a 7452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7453pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
353b0b11 7454 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7455 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
7456}
7457
7458/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7459///
7460/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)
3c0e092e
XL
7461#[inline]
7462#[target_feature(enable = "neon")]
7463#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7464#[rustc_legacy_const_generics(2)]
a2a8927a 7465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7466pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
353b0b11 7467 static_assert!(LANE == 0);
3c0e092e 7468 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7469}
7470
3c0e092e 7471/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7472///
7473/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)
17df50a5
XL
7474#[inline]
7475#[target_feature(enable = "neon")]
3c0e092e 7476#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
17df50a5 7477#[rustc_legacy_const_generics(2)]
a2a8927a 7478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7479pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
353b0b11 7480 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7481 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7482}
7483
3c0e092e 7484/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7485///
7486/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)
17df50a5
XL
7487#[inline]
7488#[target_feature(enable = "neon")]
3c0e092e
XL
7489#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7490#[rustc_legacy_const_generics(2)]
a2a8927a 7491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7492pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
353b0b11 7493 static_assert!(LANE == 0);
17df50a5 7494 #[allow(improper_ctypes)]
c295e0f8 7495 extern "unadjusted" {
3c0e092e
XL
7496 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8")]
7497 fn vld4_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, n: i64, ptr: *const i8) -> float64x1x4_t;
17df50a5 7498 }
a2a8927a 7499 vld4_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
7500}
7501
3c0e092e 7502/// Load multiple 4-element structures to four registers
f2b60f7d
FG
7503///
7504/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)
17df50a5
XL
7505#[inline]
7506#[target_feature(enable = "neon")]
3c0e092e
XL
7507#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7508#[rustc_legacy_const_generics(2)]
a2a8927a 7509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7510pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
353b0b11 7511 static_assert_uimm_bits!(LANE, 1);
17df50a5 7512 #[allow(improper_ctypes)]
c295e0f8 7513 extern "unadjusted" {
3c0e092e
XL
7514 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8")]
7515 fn vld4q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, n: i64, ptr: *const i8) -> float64x2x4_t;
17df50a5 7516 }
a2a8927a 7517 vld4q_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
7518}
7519
3c0e092e 7520/// Store multiple single-element structures from one, two, three, or four registers
f2b60f7d
FG
7521///
7522/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)
17df50a5
XL
7523#[inline]
7524#[target_feature(enable = "neon")]
3c0e092e 7525#[cfg_attr(test, assert_instr(nop, LANE = 0))]
17df50a5 7526#[rustc_legacy_const_generics(2)]
a2a8927a 7527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7528pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
353b0b11 7529 static_assert!(LANE == 0);
3c0e092e 7530 *a = simd_extract(b, LANE as u32);
17df50a5
XL
7531}
7532
3c0e092e 7533/// Store multiple single-element structures from one, two, three, or four registers
f2b60f7d
FG
7534///
7535/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)
17df50a5
XL
7536#[inline]
7537#[target_feature(enable = "neon")]
3c0e092e 7538#[cfg_attr(test, assert_instr(nop, LANE = 0))]
17df50a5 7539#[rustc_legacy_const_generics(2)]
a2a8927a 7540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7541pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
353b0b11 7542 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7543 *a = simd_extract(b, LANE as u32);
17df50a5
XL
7544}
7545
3c0e092e 7546/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7547///
7548/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)
17df50a5
XL
7549#[inline]
7550#[target_feature(enable = "neon")]
3c0e092e 7551#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7553pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
7554 #[allow(improper_ctypes)]
7555 extern "unadjusted" {
7556 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64")]
7557 fn vst1_f64_x2_(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
7558 }
7559 vst1_f64_x2_(b.0, b.1, a)
17df50a5
XL
7560}
7561
3c0e092e 7562/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7563///
7564/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)
17df50a5
XL
7565#[inline]
7566#[target_feature(enable = "neon")]
3c0e092e 7567#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7569pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
7570 #[allow(improper_ctypes)]
7571 extern "unadjusted" {
7572 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64")]
7573 fn vst1q_f64_x2_(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
7574 }
7575 vst1q_f64_x2_(b.0, b.1, a)
17df50a5
XL
7576}
7577
3c0e092e 7578/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7579///
7580/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)
17df50a5
XL
7581#[inline]
7582#[target_feature(enable = "neon")]
3c0e092e 7583#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7585pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
17df50a5 7586 #[allow(improper_ctypes)]
c295e0f8 7587 extern "unadjusted" {
3c0e092e
XL
7588 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64")]
7589 fn vst1_f64_x3_(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
17df50a5 7590 }
3c0e092e 7591 vst1_f64_x3_(b.0, b.1, b.2, a)
17df50a5
XL
7592}
7593
3c0e092e 7594/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7595///
7596/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)
17df50a5
XL
7597#[inline]
7598#[target_feature(enable = "neon")]
3c0e092e 7599#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7601pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
17df50a5 7602 #[allow(improper_ctypes)]
c295e0f8 7603 extern "unadjusted" {
3c0e092e
XL
7604 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64")]
7605 fn vst1q_f64_x3_(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
17df50a5 7606 }
3c0e092e 7607 vst1q_f64_x3_(b.0, b.1, b.2, a)
17df50a5
XL
7608}
7609
3c0e092e 7610/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7611///
7612/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)
17df50a5
XL
7613#[inline]
7614#[target_feature(enable = "neon")]
3c0e092e 7615#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7617pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
7618 #[allow(improper_ctypes)]
7619 extern "unadjusted" {
7620 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64")]
7621 fn vst1_f64_x4_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut f64);
7622 }
7623 vst1_f64_x4_(b.0, b.1, b.2, b.3, a)
17df50a5
XL
7624}
7625
3c0e092e 7626/// Store multiple single-element structures to one, two, three, or four registers
f2b60f7d
FG
7627///
7628/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)
17df50a5
XL
7629#[inline]
7630#[target_feature(enable = "neon")]
3c0e092e 7631#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7633pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
7634 #[allow(improper_ctypes)]
7635 extern "unadjusted" {
7636 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64")]
7637 fn vst1q_f64_x4_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut f64);
7638 }
7639 vst1q_f64_x4_(b.0, b.1, b.2, b.3, a)
17df50a5
XL
7640}
7641
3c0e092e 7642/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7643///
7644/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)
17df50a5
XL
7645#[inline]
7646#[target_feature(enable = "neon")]
3c0e092e 7647#[cfg_attr(test, assert_instr(st2))]
a2a8927a 7648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7649pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
7650 #[allow(improper_ctypes)]
7651 extern "unadjusted" {
7652 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v2i64.p0i8")]
7653 fn vst2q_s64_(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
7654 }
a2a8927a 7655 vst2q_s64_(b.0, b.1, a as _)
17df50a5
XL
7656}
7657
3c0e092e 7658/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7659///
7660/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)
17df50a5
XL
7661#[inline]
7662#[target_feature(enable = "neon")]
3c0e092e 7663#[cfg_attr(test, assert_instr(st2))]
a2a8927a 7664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7665pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
7666 transmute(vst2q_s64(transmute(a), transmute(b)))
17df50a5
XL
7667}
7668
3c0e092e 7669/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7670///
7671/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)
3c0e092e
XL
7672#[inline]
7673#[target_feature(enable = "neon,aes")]
7674#[cfg_attr(test, assert_instr(st2))]
a2a8927a 7675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7676pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
7677 transmute(vst2q_s64(transmute(a), transmute(b)))
7678}
7679
7680/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7681///
7682/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)
17df50a5
XL
7683#[inline]
7684#[target_feature(enable = "neon")]
3c0e092e 7685#[cfg_attr(test, assert_instr(st1))]
a2a8927a 7686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7687pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
7688 #[allow(improper_ctypes)]
7689 extern "unadjusted" {
7690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v1f64.p0i8")]
7691 fn vst2_f64_(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
7692 }
a2a8927a 7693 vst2_f64_(b.0, b.1, a as _)
17df50a5
XL
7694}
7695
3c0e092e 7696/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7697///
7698/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)
17df50a5
XL
7699#[inline]
7700#[target_feature(enable = "neon")]
3c0e092e 7701#[cfg_attr(test, assert_instr(st2))]
a2a8927a 7702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7703pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
7704 #[allow(improper_ctypes)]
7705 extern "unadjusted" {
7706 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v2f64.p0i8")]
7707 fn vst2q_f64_(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
7708 }
a2a8927a 7709 vst2q_f64_(b.0, b.1, a as _)
17df50a5
XL
7710}
7711
3c0e092e 7712/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7713///
7714/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)
17df50a5
XL
7715#[inline]
7716#[target_feature(enable = "neon")]
3c0e092e
XL
7717#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7718#[rustc_legacy_const_generics(2)]
a2a8927a 7719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7720pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
353b0b11 7721 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7722 #[allow(improper_ctypes)]
7723 extern "unadjusted" {
7724 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8")]
7725 fn vst2q_lane_s8_(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
7726 }
a2a8927a 7727 vst2q_lane_s8_(b.0, b.1, LANE as i64, a as _)
3c0e092e
XL
7728}
7729
7730/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7731///
7732/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)
3c0e092e
XL
7733#[inline]
7734#[target_feature(enable = "neon")]
7735#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7736#[rustc_legacy_const_generics(2)]
a2a8927a 7737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7738pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
353b0b11 7739 static_assert!(LANE == 0);
3c0e092e
XL
7740 #[allow(improper_ctypes)]
7741 extern "unadjusted" {
7742 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8")]
7743 fn vst2_lane_s64_(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
7744 }
a2a8927a 7745 vst2_lane_s64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
7746}
7747
3c0e092e 7748/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7749///
7750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)
17df50a5
XL
7751#[inline]
7752#[target_feature(enable = "neon")]
3c0e092e
XL
7753#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7754#[rustc_legacy_const_generics(2)]
a2a8927a 7755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7756pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
353b0b11 7757 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7758 #[allow(improper_ctypes)]
7759 extern "unadjusted" {
7760 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8")]
7761 fn vst2q_lane_s64_(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
7762 }
a2a8927a 7763 vst2q_lane_s64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
7764}
7765
3c0e092e 7766/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7767///
7768/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)
17df50a5
XL
7769#[inline]
7770#[target_feature(enable = "neon")]
3c0e092e
XL
7771#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7772#[rustc_legacy_const_generics(2)]
a2a8927a 7773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7774pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
353b0b11 7775 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7776 transmute(vst2q_lane_s8::<LANE>(transmute(a), transmute(b)))
7777}
7778
7779/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7780///
7781/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)
3c0e092e
XL
7782#[inline]
7783#[target_feature(enable = "neon")]
7784#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7785#[rustc_legacy_const_generics(2)]
a2a8927a 7786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7787pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
353b0b11 7788 static_assert!(LANE == 0);
3c0e092e 7789 transmute(vst2_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7790}
7791
3c0e092e 7792/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7793///
7794/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)
17df50a5
XL
7795#[inline]
7796#[target_feature(enable = "neon")]
3c0e092e
XL
7797#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7798#[rustc_legacy_const_generics(2)]
a2a8927a 7799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7800pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
353b0b11 7801 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7802 transmute(vst2q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7803}
7804
3c0e092e 7805/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7806///
7807/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)
17df50a5
XL
7808#[inline]
7809#[target_feature(enable = "neon")]
3c0e092e
XL
7810#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7811#[rustc_legacy_const_generics(2)]
a2a8927a 7812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7813pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
353b0b11 7814 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7815 transmute(vst2q_lane_s8::<LANE>(transmute(a), transmute(b)))
7816}
7817
7818/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7819///
7820/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)
3c0e092e
XL
7821#[inline]
7822#[target_feature(enable = "neon,aes")]
7823#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7824#[rustc_legacy_const_generics(2)]
a2a8927a 7825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7826pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
353b0b11 7827 static_assert!(LANE == 0);
3c0e092e
XL
7828 transmute(vst2_lane_s64::<LANE>(transmute(a), transmute(b)))
7829}
7830
7831/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7832///
7833/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)
3c0e092e
XL
7834#[inline]
7835#[target_feature(enable = "neon,aes")]
7836#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7837#[rustc_legacy_const_generics(2)]
a2a8927a 7838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7839pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
353b0b11 7840 static_assert_uimm_bits!(LANE, 1);
3c0e092e 7841 transmute(vst2q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
7842}
7843
3c0e092e 7844/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7845///
7846/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)
17df50a5
XL
7847#[inline]
7848#[target_feature(enable = "neon")]
3c0e092e
XL
7849#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7850#[rustc_legacy_const_generics(2)]
a2a8927a 7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7852pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
353b0b11 7853 static_assert!(LANE == 0);
17df50a5 7854 #[allow(improper_ctypes)]
c295e0f8 7855 extern "unadjusted" {
3c0e092e
XL
7856 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8")]
7857 fn vst2_lane_f64_(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
17df50a5 7858 }
a2a8927a 7859 vst2_lane_f64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
7860}
7861
3c0e092e 7862/// Store multiple 2-element structures from two registers
f2b60f7d
FG
7863///
7864/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)
17df50a5
XL
7865#[inline]
7866#[target_feature(enable = "neon")]
3c0e092e
XL
7867#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7868#[rustc_legacy_const_generics(2)]
a2a8927a 7869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7870pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
353b0b11 7871 static_assert_uimm_bits!(LANE, 1);
17df50a5 7872 #[allow(improper_ctypes)]
c295e0f8 7873 extern "unadjusted" {
3c0e092e
XL
7874 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8")]
7875 fn vst2q_lane_f64_(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
17df50a5 7876 }
a2a8927a 7877 vst2q_lane_f64_(b.0, b.1, LANE as i64, a as _)
17df50a5
XL
7878}
7879
3c0e092e 7880/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7881///
7882/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)
17df50a5
XL
7883#[inline]
7884#[target_feature(enable = "neon")]
3c0e092e 7885#[cfg_attr(test, assert_instr(st3))]
a2a8927a 7886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7887pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
17df50a5 7888 #[allow(improper_ctypes)]
c295e0f8 7889 extern "unadjusted" {
3c0e092e
XL
7890 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v2i64.p0i8")]
7891 fn vst3q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
17df50a5 7892 }
a2a8927a 7893 vst3q_s64_(b.0, b.1, b.2, a as _)
17df50a5
XL
7894}
7895
3c0e092e 7896/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7897///
7898/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)
17df50a5
XL
7899#[inline]
7900#[target_feature(enable = "neon")]
3c0e092e 7901#[cfg_attr(test, assert_instr(st3))]
a2a8927a 7902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7903pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
7904 transmute(vst3q_s64(transmute(a), transmute(b)))
17df50a5
XL
7905}
7906
3c0e092e 7907/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7908///
7909/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)
17df50a5 7910#[inline]
3c0e092e
XL
7911#[target_feature(enable = "neon,aes")]
7912#[cfg_attr(test, assert_instr(st3))]
a2a8927a 7913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7914pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
7915 transmute(vst3q_s64(transmute(a), transmute(b)))
17df50a5
XL
7916}
7917
3c0e092e 7918/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7919///
7920/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)
17df50a5
XL
7921#[inline]
7922#[target_feature(enable = "neon")]
3c0e092e 7923#[cfg_attr(test, assert_instr(nop))]
a2a8927a 7924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7925pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
7926 #[allow(improper_ctypes)]
7927 extern "unadjusted" {
7928 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v1f64.p0i8")]
7929 fn vst3_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
7930 }
a2a8927a 7931 vst3_f64_(b.0, b.1, b.2, a as _)
17df50a5
XL
7932}
7933
3c0e092e 7934/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7935///
7936/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)
17df50a5
XL
7937#[inline]
7938#[target_feature(enable = "neon")]
3c0e092e 7939#[cfg_attr(test, assert_instr(st3))]
a2a8927a 7940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
7941pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
7942 #[allow(improper_ctypes)]
7943 extern "unadjusted" {
7944 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v2f64.p0i8")]
7945 fn vst3q_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
7946 }
a2a8927a 7947 vst3q_f64_(b.0, b.1, b.2, a as _)
17df50a5
XL
7948}
7949
3c0e092e 7950/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7951///
7952/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)
17df50a5
XL
7953#[inline]
7954#[target_feature(enable = "neon")]
3c0e092e
XL
7955#[cfg_attr(test, assert_instr(st3, LANE = 0))]
7956#[rustc_legacy_const_generics(2)]
a2a8927a 7957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7958pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
353b0b11 7959 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
7960 #[allow(improper_ctypes)]
7961 extern "unadjusted" {
7962 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8")]
7963 fn vst3q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
7964 }
a2a8927a 7965 vst3q_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7966}
7967
3c0e092e 7968/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7969///
7970/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)
17df50a5
XL
7971#[inline]
7972#[target_feature(enable = "neon")]
3c0e092e
XL
7973#[cfg_attr(test, assert_instr(st3, LANE = 0))]
7974#[rustc_legacy_const_generics(2)]
a2a8927a 7975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7976pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
353b0b11 7977 static_assert!(LANE == 0);
3c0e092e
XL
7978 #[allow(improper_ctypes)]
7979 extern "unadjusted" {
7980 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8")]
7981 fn vst3_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
7982 }
a2a8927a 7983 vst3_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
7984}
7985
3c0e092e 7986/// Store multiple 3-element structures from three registers
f2b60f7d
FG
7987///
7988/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)
17df50a5
XL
7989#[inline]
7990#[target_feature(enable = "neon")]
3c0e092e
XL
7991#[cfg_attr(test, assert_instr(st3, LANE = 0))]
7992#[rustc_legacy_const_generics(2)]
a2a8927a 7993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 7994pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
353b0b11 7995 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
7996 #[allow(improper_ctypes)]
7997 extern "unadjusted" {
7998 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8")]
7999 fn vst3q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
8000 }
a2a8927a 8001 vst3q_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
8002}
8003
3c0e092e 8004/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8005///
8006/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)
17df50a5
XL
8007#[inline]
8008#[target_feature(enable = "neon")]
3c0e092e
XL
8009#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8010#[rustc_legacy_const_generics(2)]
a2a8927a 8011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8012pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
353b0b11 8013 static_assert_uimm_bits!(LANE, 4);
3c0e092e 8014 transmute(vst3q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8015}
8016
3c0e092e 8017/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8018///
8019/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)
17df50a5
XL
8020#[inline]
8021#[target_feature(enable = "neon")]
3c0e092e
XL
8022#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8023#[rustc_legacy_const_generics(2)]
a2a8927a 8024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8025pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
353b0b11 8026 static_assert!(LANE == 0);
3c0e092e 8027 transmute(vst3_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8028}
8029
3c0e092e 8030/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8031///
8032/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)
17df50a5
XL
8033#[inline]
8034#[target_feature(enable = "neon")]
3c0e092e
XL
8035#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8036#[rustc_legacy_const_generics(2)]
a2a8927a 8037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8038pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
353b0b11 8039 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8040 transmute(vst3q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8041}
8042
3c0e092e 8043/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8044///
8045/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)
17df50a5
XL
8046#[inline]
8047#[target_feature(enable = "neon")]
3c0e092e
XL
8048#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8049#[rustc_legacy_const_generics(2)]
a2a8927a 8050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8051pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
353b0b11 8052 static_assert_uimm_bits!(LANE, 4);
3c0e092e 8053 transmute(vst3q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8054}
8055
3c0e092e 8056/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8057///
8058/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)
17df50a5 8059#[inline]
3c0e092e
XL
8060#[target_feature(enable = "neon,aes")]
8061#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8062#[rustc_legacy_const_generics(2)]
a2a8927a 8063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8064pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
353b0b11 8065 static_assert!(LANE == 0);
3c0e092e 8066 transmute(vst3_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8067}
8068
3c0e092e 8069/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8070///
8071/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)
17df50a5 8072#[inline]
3c0e092e
XL
8073#[target_feature(enable = "neon,aes")]
8074#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8075#[rustc_legacy_const_generics(2)]
a2a8927a 8076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8077pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
353b0b11 8078 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8079 transmute(vst3q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8080}
8081
3c0e092e 8082/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8083///
8084/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)
17df50a5
XL
8085#[inline]
8086#[target_feature(enable = "neon")]
3c0e092e
XL
8087#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8088#[rustc_legacy_const_generics(2)]
a2a8927a 8089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8090pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
353b0b11 8091 static_assert!(LANE == 0);
3c0e092e
XL
8092 #[allow(improper_ctypes)]
8093 extern "unadjusted" {
8094 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8")]
8095 fn vst3_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
8096 }
a2a8927a 8097 vst3_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
8098}
8099
3c0e092e 8100/// Store multiple 3-element structures from three registers
f2b60f7d
FG
8101///
8102/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)
17df50a5
XL
8103#[inline]
8104#[target_feature(enable = "neon")]
3c0e092e
XL
8105#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8106#[rustc_legacy_const_generics(2)]
a2a8927a 8107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8108pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
353b0b11 8109 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
8110 #[allow(improper_ctypes)]
8111 extern "unadjusted" {
8112 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8")]
8113 fn vst3q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
8114 }
a2a8927a 8115 vst3q_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
17df50a5
XL
8116}
8117
3c0e092e 8118/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8119///
8120/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)
17df50a5
XL
8121#[inline]
8122#[target_feature(enable = "neon")]
3c0e092e 8123#[cfg_attr(test, assert_instr(st4))]
a2a8927a 8124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8125pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
8126 #[allow(improper_ctypes)]
8127 extern "unadjusted" {
8128 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v2i64.p0i8")]
8129 fn vst4q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
8130 }
a2a8927a 8131 vst4q_s64_(b.0, b.1, b.2, b.3, a as _)
17df50a5
XL
8132}
8133
3c0e092e 8134/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8135///
8136/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)
17df50a5
XL
8137#[inline]
8138#[target_feature(enable = "neon")]
3c0e092e 8139#[cfg_attr(test, assert_instr(st4))]
a2a8927a 8140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8141pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
8142 transmute(vst4q_s64(transmute(a), transmute(b)))
17df50a5
XL
8143}
8144
3c0e092e 8145/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8146///
8147/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)
17df50a5 8148#[inline]
3c0e092e
XL
8149#[target_feature(enable = "neon,aes")]
8150#[cfg_attr(test, assert_instr(st4))]
a2a8927a 8151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8152pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
8153 transmute(vst4q_s64(transmute(a), transmute(b)))
17df50a5
XL
8154}
8155
3c0e092e 8156/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8157///
8158/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)
17df50a5
XL
8159#[inline]
8160#[target_feature(enable = "neon")]
3c0e092e 8161#[cfg_attr(test, assert_instr(nop))]
a2a8927a 8162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8163pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
8164 #[allow(improper_ctypes)]
8165 extern "unadjusted" {
8166 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v1f64.p0i8")]
8167 fn vst4_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
8168 }
a2a8927a 8169 vst4_f64_(b.0, b.1, b.2, b.3, a as _)
17df50a5
XL
8170}
8171
3c0e092e 8172/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8173///
8174/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)
17df50a5
XL
8175#[inline]
8176#[target_feature(enable = "neon")]
3c0e092e 8177#[cfg_attr(test, assert_instr(st4))]
a2a8927a 8178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8179pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
8180 #[allow(improper_ctypes)]
8181 extern "unadjusted" {
8182 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v2f64.p0i8")]
8183 fn vst4q_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
8184 }
a2a8927a 8185 vst4q_f64_(b.0, b.1, b.2, b.3, a as _)
17df50a5
XL
8186}
8187
3c0e092e 8188/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8189///
8190/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)
17df50a5
XL
8191#[inline]
8192#[target_feature(enable = "neon")]
3c0e092e
XL
8193#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8194#[rustc_legacy_const_generics(2)]
a2a8927a 8195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8196pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
353b0b11 8197 static_assert_uimm_bits!(LANE, 4);
3c0e092e
XL
8198 #[allow(improper_ctypes)]
8199 extern "unadjusted" {
8200 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8")]
8201 fn vst4q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, n: i64, ptr: *mut i8);
8202 }
a2a8927a 8203 vst4q_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
8204}
8205
3c0e092e 8206/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8207///
8208/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)
17df50a5
XL
8209#[inline]
8210#[target_feature(enable = "neon")]
3c0e092e
XL
8211#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8212#[rustc_legacy_const_generics(2)]
a2a8927a 8213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8214pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
353b0b11 8215 static_assert!(LANE == 0);
17df50a5 8216 #[allow(improper_ctypes)]
c295e0f8 8217 extern "unadjusted" {
3c0e092e
XL
8218 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8")]
8219 fn vst4_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, n: i64, ptr: *mut i8);
17df50a5 8220 }
a2a8927a 8221 vst4_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
8222}
8223
3c0e092e 8224/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8225///
8226/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)
17df50a5
XL
8227#[inline]
8228#[target_feature(enable = "neon")]
3c0e092e
XL
8229#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8230#[rustc_legacy_const_generics(2)]
a2a8927a 8231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8232pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
353b0b11 8233 static_assert_uimm_bits!(LANE, 1);
17df50a5 8234 #[allow(improper_ctypes)]
c295e0f8 8235 extern "unadjusted" {
3c0e092e
XL
8236 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8")]
8237 fn vst4q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, n: i64, ptr: *mut i8);
17df50a5 8238 }
a2a8927a 8239 vst4q_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
8240}
8241
3c0e092e 8242/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8243///
8244/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)
17df50a5
XL
8245#[inline]
8246#[target_feature(enable = "neon")]
3c0e092e
XL
8247#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8248#[rustc_legacy_const_generics(2)]
a2a8927a 8249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8250pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
353b0b11 8251 static_assert_uimm_bits!(LANE, 4);
3c0e092e 8252 transmute(vst4q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8253}
8254
3c0e092e 8255/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8256///
8257/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)
17df50a5
XL
8258#[inline]
8259#[target_feature(enable = "neon")]
3c0e092e
XL
8260#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8261#[rustc_legacy_const_generics(2)]
a2a8927a 8262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8263pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
353b0b11 8264 static_assert!(LANE == 0);
3c0e092e 8265 transmute(vst4_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8266}
8267
3c0e092e 8268/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8269///
8270/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)
17df50a5
XL
8271#[inline]
8272#[target_feature(enable = "neon")]
3c0e092e
XL
8273#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8274#[rustc_legacy_const_generics(2)]
a2a8927a 8275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8276pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
353b0b11 8277 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8278 transmute(vst4q_lane_s64::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8279}
8280
3c0e092e 8281/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8282///
8283/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)
17df50a5
XL
8284#[inline]
8285#[target_feature(enable = "neon")]
3c0e092e
XL
8286#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8287#[rustc_legacy_const_generics(2)]
a2a8927a 8288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8289pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
353b0b11 8290 static_assert_uimm_bits!(LANE, 4);
3c0e092e 8291 transmute(vst4q_lane_s8::<LANE>(transmute(a), transmute(b)))
17df50a5
XL
8292}
8293
3c0e092e 8294/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8295///
8296/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)
3c0e092e
XL
8297#[inline]
8298#[target_feature(enable = "neon,aes")]
8299#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8300#[rustc_legacy_const_generics(2)]
a2a8927a 8301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8302pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
353b0b11 8303 static_assert!(LANE == 0);
3c0e092e
XL
8304 transmute(vst4_lane_s64::<LANE>(transmute(a), transmute(b)))
8305}
8306
8307/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8308///
8309/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)
3c0e092e
XL
8310#[inline]
8311#[target_feature(enable = "neon,aes")]
8312#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8313#[rustc_legacy_const_generics(2)]
a2a8927a 8314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8315pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
353b0b11 8316 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
8317 transmute(vst4q_lane_s64::<LANE>(transmute(a), transmute(b)))
8318}
8319
8320/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8321///
8322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)
17df50a5
XL
8323#[inline]
8324#[target_feature(enable = "neon")]
3c0e092e
XL
8325#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8326#[rustc_legacy_const_generics(2)]
a2a8927a 8327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8328pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
353b0b11 8329 static_assert!(LANE == 0);
17df50a5 8330 #[allow(improper_ctypes)]
c295e0f8 8331 extern "unadjusted" {
3c0e092e
XL
8332 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8")]
8333 fn vst4_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, n: i64, ptr: *mut i8);
17df50a5 8334 }
a2a8927a 8335 vst4_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
8336}
8337
3c0e092e 8338/// Store multiple 4-element structures from four registers
f2b60f7d
FG
8339///
8340/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)
17df50a5
XL
8341#[inline]
8342#[target_feature(enable = "neon")]
3c0e092e
XL
8343#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8344#[rustc_legacy_const_generics(2)]
a2a8927a 8345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8346pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
353b0b11 8347 static_assert_uimm_bits!(LANE, 1);
17df50a5 8348 #[allow(improper_ctypes)]
c295e0f8 8349 extern "unadjusted" {
3c0e092e
XL
8350 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8")]
8351 fn vst4q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, n: i64, ptr: *mut i8);
17df50a5 8352 }
a2a8927a 8353 vst4q_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
17df50a5
XL
8354}
8355
3c0e092e 8356/// Multiply
f2b60f7d
FG
8357///
8358/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)
17df50a5
XL
8359#[inline]
8360#[target_feature(enable = "neon")]
3c0e092e 8361#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 8362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8363pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8364 simd_mul(a, b)
17df50a5
XL
8365}
8366
3c0e092e 8367/// Multiply
f2b60f7d
FG
8368///
8369/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)
17df50a5
XL
8370#[inline]
8371#[target_feature(enable = "neon")]
3c0e092e 8372#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 8373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8374pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
8375 simd_mul(a, b)
17df50a5
XL
8376}
8377
3c0e092e 8378/// Vector multiply by scalar
f2b60f7d
FG
8379///
8380/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)
17df50a5
XL
8381#[inline]
8382#[target_feature(enable = "neon")]
3c0e092e 8383#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 8384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8385pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
8386 simd_mul(a, vdup_n_f64(b))
17df50a5
XL
8387}
8388
3c0e092e 8389/// Vector multiply by scalar
f2b60f7d
FG
8390///
8391/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)
17df50a5
XL
8392#[inline]
8393#[target_feature(enable = "neon")]
3c0e092e 8394#[cfg_attr(test, assert_instr(fmul))]
a2a8927a 8395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8396pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
8397 simd_mul(a, vdupq_n_f64(b))
17df50a5
XL
8398}
8399
3c0e092e 8400/// Floating-point multiply
f2b60f7d
FG
8401///
8402/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)
17df50a5
XL
8403#[inline]
8404#[target_feature(enable = "neon")]
3c0e092e
XL
8405#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8406#[rustc_legacy_const_generics(2)]
a2a8927a 8407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8408pub unsafe fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
353b0b11 8409 static_assert!(LANE == 0);
3c0e092e 8410 simd_mul(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
17df50a5
XL
8411}
8412
3c0e092e 8413/// Floating-point multiply
f2b60f7d
FG
8414///
8415/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)
17df50a5
XL
8416#[inline]
8417#[target_feature(enable = "neon")]
3c0e092e
XL
8418#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8419#[rustc_legacy_const_generics(2)]
a2a8927a 8420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8421pub unsafe fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
353b0b11 8422 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8423 simd_mul(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
17df50a5
XL
8424}
8425
3c0e092e 8426/// Floating-point multiply
f2b60f7d
FG
8427///
8428/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)
17df50a5
XL
8429#[inline]
8430#[target_feature(enable = "neon")]
3c0e092e
XL
8431#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8432#[rustc_legacy_const_generics(2)]
a2a8927a 8433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8434pub unsafe fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
353b0b11
FG
8435 static_assert!(LANE == 0);
8436 simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8437}
8438
3c0e092e 8439/// Floating-point multiply
f2b60f7d
FG
8440///
8441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)
17df50a5
XL
8442#[inline]
8443#[target_feature(enable = "neon")]
3c0e092e
XL
8444#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8445#[rustc_legacy_const_generics(2)]
a2a8927a 8446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8447pub unsafe fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11
FG
8448 static_assert_uimm_bits!(LANE, 1);
8449 simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8450}
8451
3c0e092e 8452/// Floating-point multiply
f2b60f7d
FG
8453///
8454/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)
17df50a5
XL
8455#[inline]
8456#[target_feature(enable = "neon")]
3c0e092e
XL
8457#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8458#[rustc_legacy_const_generics(2)]
a2a8927a 8459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8460pub unsafe fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
353b0b11 8461 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
8462 let b: f32 = simd_extract(b, LANE as u32);
8463 a * b
17df50a5
XL
8464}
8465
3c0e092e 8466/// Floating-point multiply
f2b60f7d
FG
8467///
8468/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)
17df50a5
XL
8469#[inline]
8470#[target_feature(enable = "neon")]
3c0e092e
XL
8471#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8472#[rustc_legacy_const_generics(2)]
a2a8927a 8473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8474pub unsafe fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
353b0b11 8475 static_assert_uimm_bits!(LANE, 2);
3c0e092e
XL
8476 let b: f32 = simd_extract(b, LANE as u32);
8477 a * b
17df50a5
XL
8478}
8479
3c0e092e 8480/// Floating-point multiply
f2b60f7d
FG
8481///
8482/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)
17df50a5
XL
8483#[inline]
8484#[target_feature(enable = "neon")]
3c0e092e
XL
8485#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8486#[rustc_legacy_const_generics(2)]
a2a8927a 8487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8488pub unsafe fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
353b0b11 8489 static_assert!(LANE == 0);
3c0e092e
XL
8490 let b: f64 = simd_extract(b, LANE as u32);
8491 a * b
17df50a5
XL
8492}
8493
3c0e092e 8494/// Floating-point multiply
f2b60f7d
FG
8495///
8496/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)
17df50a5
XL
8497#[inline]
8498#[target_feature(enable = "neon")]
3c0e092e
XL
8499#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8500#[rustc_legacy_const_generics(2)]
a2a8927a 8501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8502pub unsafe fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
353b0b11 8503 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
8504 let b: f64 = simd_extract(b, LANE as u32);
8505 a * b
17df50a5
XL
8506}
8507
3c0e092e 8508/// Signed multiply long
f2b60f7d
FG
8509///
8510/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)
17df50a5
XL
8511#[inline]
8512#[target_feature(enable = "neon")]
3c0e092e 8513#[cfg_attr(test, assert_instr(smull2))]
a2a8927a 8514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8515pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
353b0b11
FG
8516 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8517 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 8518 vmull_s8(a, b)
17df50a5
XL
8519}
8520
3c0e092e 8521/// Signed multiply long
f2b60f7d
FG
8522///
8523/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)
17df50a5
XL
8524#[inline]
8525#[target_feature(enable = "neon")]
3c0e092e 8526#[cfg_attr(test, assert_instr(smull2))]
a2a8927a 8527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8528pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
8529 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
8530 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e 8531 vmull_s16(a, b)
17df50a5
XL
8532}
8533
3c0e092e 8534/// Signed multiply long
f2b60f7d
FG
8535///
8536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)
17df50a5
XL
8537#[inline]
8538#[target_feature(enable = "neon")]
3c0e092e 8539#[cfg_attr(test, assert_instr(smull2))]
a2a8927a 8540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8541pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
8542 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
8543 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e 8544 vmull_s32(a, b)
17df50a5
XL
8545}
8546
3c0e092e 8547/// Unsigned multiply long
f2b60f7d
FG
8548///
8549/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)
17df50a5
XL
8550#[inline]
8551#[target_feature(enable = "neon")]
3c0e092e 8552#[cfg_attr(test, assert_instr(umull2))]
a2a8927a 8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8554pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
353b0b11
FG
8555 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8556 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 8557 vmull_u8(a, b)
17df50a5
XL
8558}
8559
3c0e092e 8560/// Unsigned multiply long
f2b60f7d
FG
8561///
8562/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)
17df50a5
XL
8563#[inline]
8564#[target_feature(enable = "neon")]
3c0e092e 8565#[cfg_attr(test, assert_instr(umull2))]
a2a8927a 8566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8567pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
353b0b11
FG
8568 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
8569 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e 8570 vmull_u16(a, b)
17df50a5
XL
8571}
8572
3c0e092e 8573/// Unsigned multiply long
f2b60f7d
FG
8574///
8575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)
17df50a5
XL
8576#[inline]
8577#[target_feature(enable = "neon")]
3c0e092e 8578#[cfg_attr(test, assert_instr(umull2))]
a2a8927a 8579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8580pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
353b0b11
FG
8581 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
8582 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e 8583 vmull_u32(a, b)
17df50a5
XL
8584}
8585
3c0e092e 8586/// Polynomial multiply long
f2b60f7d
FG
8587///
8588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)
17df50a5 8589#[inline]
3c0e092e
XL
8590#[target_feature(enable = "neon,aes")]
8591#[cfg_attr(test, assert_instr(pmull))]
a2a8927a 8592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8593pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 {
17df50a5 8594 #[allow(improper_ctypes)]
c295e0f8 8595 extern "unadjusted" {
3c0e092e
XL
8596 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmull64")]
8597 fn vmull_p64_(a: p64, b: p64) -> int8x16_t;
17df50a5 8598 }
3c0e092e 8599 transmute(vmull_p64_(a, b))
17df50a5
XL
8600}
8601
3c0e092e 8602/// Polynomial multiply long
f2b60f7d
FG
8603///
8604/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)
17df50a5
XL
8605#[inline]
8606#[target_feature(enable = "neon")]
3c0e092e 8607#[cfg_attr(test, assert_instr(pmull))]
a2a8927a 8608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8609pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
353b0b11
FG
8610 let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8611 let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 8612 vmull_p8(a, b)
17df50a5
XL
8613}
8614
3c0e092e 8615/// Polynomial multiply long
f2b60f7d
FG
8616///
8617/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)
17df50a5 8618#[inline]
3c0e092e
XL
8619#[target_feature(enable = "neon,aes")]
8620#[cfg_attr(test, assert_instr(pmull))]
a2a8927a 8621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8622pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
8623 vmull_p64(simd_extract(a, 1), simd_extract(b, 1))
17df50a5
XL
8624}
8625
3c0e092e 8626/// Multiply long
f2b60f7d
FG
8627///
8628/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)
17df50a5
XL
8629#[inline]
8630#[target_feature(enable = "neon")]
3c0e092e 8631#[cfg_attr(test, assert_instr(smull2))]
a2a8927a 8632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8633pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
8634 vmull_high_s16(a, vdupq_n_s16(b))
17df50a5
XL
8635}
8636
3c0e092e 8637/// Multiply long
f2b60f7d
FG
8638///
8639/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)
17df50a5
XL
8640#[inline]
8641#[target_feature(enable = "neon")]
3c0e092e 8642#[cfg_attr(test, assert_instr(smull2))]
a2a8927a 8643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8644pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
8645 vmull_high_s32(a, vdupq_n_s32(b))
17df50a5
XL
8646}
8647
3c0e092e 8648/// Multiply long
f2b60f7d
FG
8649///
8650/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)
17df50a5
XL
8651#[inline]
8652#[target_feature(enable = "neon")]
3c0e092e 8653#[cfg_attr(test, assert_instr(umull2))]
a2a8927a 8654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8655pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
8656 vmull_high_u16(a, vdupq_n_u16(b))
17df50a5
XL
8657}
8658
3c0e092e 8659/// Multiply long
f2b60f7d
FG
8660///
8661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)
17df50a5
XL
8662#[inline]
8663#[target_feature(enable = "neon")]
3c0e092e 8664#[cfg_attr(test, assert_instr(umull2))]
a2a8927a 8665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8666pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
8667 vmull_high_u32(a, vdupq_n_u32(b))
17df50a5
XL
8668}
8669
3c0e092e 8670/// Multiply long
f2b60f7d
FG
8671///
8672/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)
17df50a5
XL
8673#[inline]
8674#[target_feature(enable = "neon")]
3c0e092e
XL
8675#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8676#[rustc_legacy_const_generics(2)]
a2a8927a 8677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8678pub unsafe fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
353b0b11
FG
8679 static_assert_uimm_bits!(LANE, 2);
8680 vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8681}
8682
3c0e092e 8683/// Multiply long
f2b60f7d
FG
8684///
8685/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)
17df50a5
XL
8686#[inline]
8687#[target_feature(enable = "neon")]
3c0e092e
XL
8688#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8689#[rustc_legacy_const_generics(2)]
a2a8927a 8690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8691pub unsafe fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
8692 static_assert_uimm_bits!(LANE, 3);
8693 vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8694}
8695
3c0e092e 8696/// Multiply long
f2b60f7d
FG
8697///
8698/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)
17df50a5
XL
8699#[inline]
8700#[target_feature(enable = "neon")]
3c0e092e
XL
8701#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8702#[rustc_legacy_const_generics(2)]
a2a8927a 8703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8704pub unsafe fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
353b0b11
FG
8705 static_assert_uimm_bits!(LANE, 1);
8706 vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8707}
8708
3c0e092e 8709/// Multiply long
f2b60f7d
FG
8710///
8711/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)
17df50a5
XL
8712#[inline]
8713#[target_feature(enable = "neon")]
3c0e092e
XL
8714#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8715#[rustc_legacy_const_generics(2)]
a2a8927a 8716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8717pub unsafe fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
8718 static_assert_uimm_bits!(LANE, 2);
8719 vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8720}
8721
3c0e092e 8722/// Multiply long
f2b60f7d
FG
8723///
8724/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)
17df50a5
XL
8725#[inline]
8726#[target_feature(enable = "neon")]
3c0e092e
XL
8727#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8728#[rustc_legacy_const_generics(2)]
a2a8927a 8729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8730pub unsafe fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
353b0b11
FG
8731 static_assert_uimm_bits!(LANE, 2);
8732 vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8733}
8734
3c0e092e 8735/// Multiply long
f2b60f7d
FG
8736///
8737/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)
17df50a5
XL
8738#[inline]
8739#[target_feature(enable = "neon")]
3c0e092e
XL
8740#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8741#[rustc_legacy_const_generics(2)]
a2a8927a 8742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8743pub unsafe fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
353b0b11
FG
8744 static_assert_uimm_bits!(LANE, 3);
8745 vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8746}
8747
3c0e092e 8748/// Multiply long
f2b60f7d
FG
8749///
8750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)
17df50a5
XL
8751#[inline]
8752#[target_feature(enable = "neon")]
3c0e092e
XL
8753#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8754#[rustc_legacy_const_generics(2)]
a2a8927a 8755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8756pub unsafe fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
353b0b11
FG
8757 static_assert_uimm_bits!(LANE, 1);
8758 vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8759}
8760
3c0e092e 8761/// Multiply long
f2b60f7d
FG
8762///
8763/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)
17df50a5
XL
8764#[inline]
8765#[target_feature(enable = "neon")]
3c0e092e 8766#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
17df50a5 8767#[rustc_legacy_const_generics(2)]
a2a8927a 8768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8769pub unsafe fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
353b0b11
FG
8770 static_assert_uimm_bits!(LANE, 2);
8771 vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8772}
8773
3c0e092e 8774/// Floating-point multiply extended
f2b60f7d
FG
8775///
8776/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)
17df50a5
XL
8777#[inline]
8778#[target_feature(enable = "neon")]
3c0e092e 8779#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8781pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
8782 #[allow(improper_ctypes)]
8783 extern "unadjusted" {
8784 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v2f32")]
8785 fn vmulx_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
8786 }
8787 vmulx_f32_(a, b)
17df50a5
XL
8788}
8789
3c0e092e 8790/// Floating-point multiply extended
f2b60f7d
FG
8791///
8792/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)
17df50a5
XL
8793#[inline]
8794#[target_feature(enable = "neon")]
3c0e092e 8795#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8797pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
8798 #[allow(improper_ctypes)]
8799 extern "unadjusted" {
8800 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v4f32")]
8801 fn vmulxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
8802 }
8803 vmulxq_f32_(a, b)
17df50a5
XL
8804}
8805
3c0e092e 8806/// Floating-point multiply extended
f2b60f7d
FG
8807///
8808/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)
17df50a5
XL
8809#[inline]
8810#[target_feature(enable = "neon")]
3c0e092e 8811#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8813pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8814 #[allow(improper_ctypes)]
8815 extern "unadjusted" {
8816 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v1f64")]
8817 fn vmulx_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
8818 }
8819 vmulx_f64_(a, b)
17df50a5
XL
8820}
8821
3c0e092e 8822/// Floating-point multiply extended
f2b60f7d
FG
8823///
8824/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)
17df50a5
XL
8825#[inline]
8826#[target_feature(enable = "neon")]
3c0e092e 8827#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8829pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
8830 #[allow(improper_ctypes)]
8831 extern "unadjusted" {
8832 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v2f64")]
8833 fn vmulxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
8834 }
8835 vmulxq_f64_(a, b)
17df50a5
XL
8836}
8837
3c0e092e 8838/// Floating-point multiply extended
f2b60f7d
FG
8839///
8840/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)
17df50a5
XL
8841#[inline]
8842#[target_feature(enable = "neon")]
3c0e092e 8843#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
17df50a5 8844#[rustc_legacy_const_generics(2)]
a2a8927a 8845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8846pub unsafe fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
353b0b11 8847 static_assert!(LANE == 0);
3c0e092e 8848 vmulx_f64(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
17df50a5
XL
8849}
8850
3c0e092e 8851/// Floating-point multiply extended
f2b60f7d
FG
8852///
8853/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)
17df50a5
XL
8854#[inline]
8855#[target_feature(enable = "neon")]
3c0e092e 8856#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
17df50a5 8857#[rustc_legacy_const_generics(2)]
a2a8927a 8858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8859pub unsafe fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
353b0b11 8860 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8861 vmulx_f64(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
17df50a5
XL
8862}
8863
3c0e092e 8864/// Floating-point multiply extended
f2b60f7d
FG
8865///
8866/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)
17df50a5
XL
8867#[inline]
8868#[target_feature(enable = "neon")]
3c0e092e 8869#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
17df50a5 8870#[rustc_legacy_const_generics(2)]
a2a8927a 8871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8872pub unsafe fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11
FG
8873 static_assert_uimm_bits!(LANE, 1);
8874 vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8875}
8876
3c0e092e 8877/// Floating-point multiply extended
f2b60f7d
FG
8878///
8879/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)
17df50a5
XL
8880#[inline]
8881#[target_feature(enable = "neon")]
3c0e092e 8882#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
17df50a5 8883#[rustc_legacy_const_generics(2)]
a2a8927a 8884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8885pub unsafe fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
353b0b11
FG
8886 static_assert_uimm_bits!(LANE, 2);
8887 vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8888}
8889
3c0e092e 8890/// Floating-point multiply extended
f2b60f7d
FG
8891///
8892/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)
17df50a5
XL
8893#[inline]
8894#[target_feature(enable = "neon")]
3c0e092e 8895#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
17df50a5 8896#[rustc_legacy_const_generics(2)]
a2a8927a 8897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8898pub unsafe fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
353b0b11
FG
8899 static_assert_uimm_bits!(LANE, 1);
8900 vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8901}
8902
3c0e092e 8903/// Floating-point multiply extended
f2b60f7d
FG
8904///
8905/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)
17df50a5
XL
8906#[inline]
8907#[target_feature(enable = "neon")]
3c0e092e
XL
8908#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8909#[rustc_legacy_const_generics(2)]
a2a8927a 8910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8911pub unsafe fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11
FG
8912 static_assert_uimm_bits!(LANE, 2);
8913 vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
17df50a5
XL
8914}
8915
3c0e092e 8916/// Floating-point multiply extended
f2b60f7d
FG
8917///
8918/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)
17df50a5
XL
8919#[inline]
8920#[target_feature(enable = "neon")]
3c0e092e
XL
8921#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8922#[rustc_legacy_const_generics(2)]
a2a8927a 8923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8924pub unsafe fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
353b0b11
FG
8925 static_assert!(LANE == 0);
8926 vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8927}
8928
3c0e092e 8929/// Floating-point multiply extended
f2b60f7d
FG
8930///
8931/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)
17df50a5
XL
8932#[inline]
8933#[target_feature(enable = "neon")]
3c0e092e
XL
8934#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8935#[rustc_legacy_const_generics(2)]
a2a8927a 8936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8937pub unsafe fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11
FG
8938 static_assert_uimm_bits!(LANE, 1);
8939 vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
17df50a5
XL
8940}
8941
3c0e092e 8942/// Floating-point multiply extended
f2b60f7d
FG
8943///
8944/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)
17df50a5
XL
8945#[inline]
8946#[target_feature(enable = "neon")]
3c0e092e 8947#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8949pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 {
8950 #[allow(improper_ctypes)]
8951 extern "unadjusted" {
8952 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.f32")]
8953 fn vmulxs_f32_(a: f32, b: f32) -> f32;
8954 }
8955 vmulxs_f32_(a, b)
17df50a5
XL
8956}
8957
3c0e092e 8958/// Floating-point multiply extended
f2b60f7d
FG
8959///
8960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)
17df50a5
XL
8961#[inline]
8962#[target_feature(enable = "neon")]
3c0e092e 8963#[cfg_attr(test, assert_instr(fmulx))]
a2a8927a 8964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
8965pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 {
8966 #[allow(improper_ctypes)]
8967 extern "unadjusted" {
8968 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.f64")]
8969 fn vmulxd_f64_(a: f64, b: f64) -> f64;
8970 }
8971 vmulxd_f64_(a, b)
17df50a5
XL
8972}
8973
3c0e092e 8974/// Floating-point multiply extended
f2b60f7d
FG
8975///
8976/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)
17df50a5
XL
8977#[inline]
8978#[target_feature(enable = "neon")]
3c0e092e
XL
8979#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8980#[rustc_legacy_const_generics(2)]
a2a8927a 8981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8982pub unsafe fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
353b0b11 8983 static_assert_uimm_bits!(LANE, 1);
3c0e092e 8984 vmulxs_f32(a, simd_extract(b, LANE as u32))
17df50a5
XL
8985}
8986
3c0e092e 8987/// Floating-point multiply extended
f2b60f7d
FG
8988///
8989/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)
17df50a5
XL
8990#[inline]
8991#[target_feature(enable = "neon")]
3c0e092e
XL
8992#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8993#[rustc_legacy_const_generics(2)]
a2a8927a 8994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 8995pub unsafe fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
353b0b11 8996 static_assert_uimm_bits!(LANE, 2);
3c0e092e 8997 vmulxs_f32(a, simd_extract(b, LANE as u32))
17df50a5
XL
8998}
8999
3c0e092e 9000/// Floating-point multiply extended
f2b60f7d
FG
9001///
9002/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)
17df50a5
XL
9003#[inline]
9004#[target_feature(enable = "neon")]
3c0e092e
XL
9005#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9006#[rustc_legacy_const_generics(2)]
a2a8927a 9007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9008pub unsafe fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
353b0b11 9009 static_assert!(LANE == 0);
3c0e092e 9010 vmulxd_f64(a, simd_extract(b, LANE as u32))
17df50a5
XL
9011}
9012
3c0e092e 9013/// Floating-point multiply extended
f2b60f7d
FG
9014///
9015/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)
17df50a5
XL
9016#[inline]
9017#[target_feature(enable = "neon")]
3c0e092e
XL
9018#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9019#[rustc_legacy_const_generics(2)]
a2a8927a 9020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9021pub unsafe fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
353b0b11 9022 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9023 vmulxd_f64(a, simd_extract(b, LANE as u32))
17df50a5
XL
9024}
9025
3c0e092e 9026/// Floating-point fused Multiply-Add to accumulator(vector)
f2b60f7d
FG
9027///
9028/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)
17df50a5
XL
9029#[inline]
9030#[target_feature(enable = "neon")]
3c0e092e 9031#[cfg_attr(test, assert_instr(fmadd))]
a2a8927a 9032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9033pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9034 #[allow(improper_ctypes)]
9035 extern "unadjusted" {
9036 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v1f64")]
9037 fn vfma_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t;
9038 }
9039 vfma_f64_(b, c, a)
17df50a5
XL
9040}
9041
3c0e092e 9042/// Floating-point fused Multiply-Add to accumulator(vector)
f2b60f7d
FG
9043///
9044/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)
17df50a5
XL
9045#[inline]
9046#[target_feature(enable = "neon")]
3c0e092e 9047#[cfg_attr(test, assert_instr(fmla))]
a2a8927a 9048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9049pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9050 #[allow(improper_ctypes)]
9051 extern "unadjusted" {
9052 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f64")]
9053 fn vfmaq_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
9054 }
9055 vfmaq_f64_(b, c, a)
17df50a5
XL
9056}
9057
3c0e092e 9058/// Floating-point fused Multiply-Add to accumulator(vector)
f2b60f7d
FG
9059///
9060/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)
17df50a5
XL
9061#[inline]
9062#[target_feature(enable = "neon")]
3c0e092e 9063#[cfg_attr(test, assert_instr(fmadd))]
a2a8927a 9064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9065pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
9066 vfma_f64(a, b, vdup_n_f64(c))
17df50a5
XL
9067}
9068
3c0e092e 9069/// Floating-point fused Multiply-Add to accumulator(vector)
f2b60f7d
FG
9070///
9071/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)
17df50a5
XL
9072#[inline]
9073#[target_feature(enable = "neon")]
3c0e092e 9074#[cfg_attr(test, assert_instr(fmla))]
a2a8927a 9075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9076pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
9077 vfmaq_f64(a, b, vdupq_n_f64(c))
17df50a5
XL
9078}
9079
3c0e092e 9080/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9081///
9082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)
17df50a5
XL
9083#[inline]
9084#[target_feature(enable = "neon")]
3c0e092e
XL
9085#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9086#[rustc_legacy_const_generics(3)]
a2a8927a 9087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9088pub unsafe fn vfma_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11 9089 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9090 vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9091}
9092
3c0e092e 9093/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9094///
9095/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)
17df50a5
XL
9096#[inline]
9097#[target_feature(enable = "neon")]
3c0e092e 9098#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
17df50a5 9099#[rustc_legacy_const_generics(3)]
a2a8927a 9100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9101pub unsafe fn vfma_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11 9102 static_assert_uimm_bits!(LANE, 2);
3c0e092e 9103 vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9104}
9105
3c0e092e 9106/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9107///
9108/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)
17df50a5
XL
9109#[inline]
9110#[target_feature(enable = "neon")]
3c0e092e 9111#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
17df50a5 9112#[rustc_legacy_const_generics(3)]
a2a8927a 9113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9114pub unsafe fn vfmaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11 9115 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9116 vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9117}
9118
3c0e092e 9119/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9120///
9121/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)
17df50a5
XL
9122#[inline]
9123#[target_feature(enable = "neon")]
3c0e092e 9124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
17df50a5 9125#[rustc_legacy_const_generics(3)]
a2a8927a 9126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9127pub unsafe fn vfmaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11 9128 static_assert_uimm_bits!(LANE, 2);
3c0e092e 9129 vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9130}
9131
3c0e092e 9132/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9133///
9134/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)
17df50a5
XL
9135#[inline]
9136#[target_feature(enable = "neon")]
3c0e092e 9137#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
17df50a5 9138#[rustc_legacy_const_generics(3)]
a2a8927a 9139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9140pub unsafe fn vfma_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
353b0b11 9141 static_assert!(LANE == 0);
3c0e092e 9142 vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9143}
9144
3c0e092e 9145/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9146///
9147/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)
17df50a5
XL
9148#[inline]
9149#[target_feature(enable = "neon")]
3c0e092e 9150#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
17df50a5 9151#[rustc_legacy_const_generics(3)]
a2a8927a 9152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9153pub unsafe fn vfma_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
353b0b11 9154 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9155 vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9156}
9157
3c0e092e 9158/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9159///
9160/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)
17df50a5
XL
9161#[inline]
9162#[target_feature(enable = "neon")]
3c0e092e 9163#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
17df50a5 9164#[rustc_legacy_const_generics(3)]
a2a8927a 9165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9166pub unsafe fn vfmaq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
353b0b11 9167 static_assert!(LANE == 0);
3c0e092e 9168 vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9169}
9170
3c0e092e 9171/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9172///
9173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)
17df50a5
XL
9174#[inline]
9175#[target_feature(enable = "neon")]
3c0e092e
XL
9176#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9177#[rustc_legacy_const_generics(3)]
a2a8927a 9178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9179pub unsafe fn vfmaq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
353b0b11 9180 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9181 vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9182}
9183
3c0e092e 9184/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9185///
9186/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)
17df50a5
XL
9187#[inline]
9188#[target_feature(enable = "neon")]
3c0e092e
XL
9189#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9190#[rustc_legacy_const_generics(3)]
a2a8927a 9191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9192pub unsafe fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
9193 #[allow(improper_ctypes)]
9194 extern "unadjusted" {
9195 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")]
9196 fn vfmas_lane_f32_(a: f32, b: f32, c: f32) -> f32;
9197 }
353b0b11 9198 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
9199 let c: f32 = simd_extract(c, LANE as u32);
9200 vfmas_lane_f32_(b, c, a)
17df50a5
XL
9201}
9202
3c0e092e 9203/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9204///
9205/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)
17df50a5
XL
9206#[inline]
9207#[target_feature(enable = "neon")]
3c0e092e
XL
9208#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9209#[rustc_legacy_const_generics(3)]
a2a8927a 9210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9211pub unsafe fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
9212 #[allow(improper_ctypes)]
9213 extern "unadjusted" {
9214 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")]
9215 fn vfmas_laneq_f32_(a: f32, b: f32, c: f32) -> f32;
9216 }
353b0b11 9217 static_assert_uimm_bits!(LANE, 2);
3c0e092e
XL
9218 let c: f32 = simd_extract(c, LANE as u32);
9219 vfmas_laneq_f32_(b, c, a)
17df50a5
XL
9220}
9221
3c0e092e 9222/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9223///
9224/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)
17df50a5
XL
9225#[inline]
9226#[target_feature(enable = "neon")]
3c0e092e
XL
9227#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9228#[rustc_legacy_const_generics(3)]
a2a8927a 9229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9230pub unsafe fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
9231 #[allow(improper_ctypes)]
9232 extern "unadjusted" {
9233 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")]
9234 fn vfmad_lane_f64_(a: f64, b: f64, c: f64) -> f64;
9235 }
353b0b11 9236 static_assert!(LANE == 0);
3c0e092e
XL
9237 let c: f64 = simd_extract(c, LANE as u32);
9238 vfmad_lane_f64_(b, c, a)
17df50a5
XL
9239}
9240
3c0e092e 9241/// Floating-point fused multiply-add to accumulator
f2b60f7d
FG
9242///
9243/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)
17df50a5
XL
9244#[inline]
9245#[target_feature(enable = "neon")]
3c0e092e
XL
9246#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9247#[rustc_legacy_const_generics(3)]
a2a8927a 9248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9249pub unsafe fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
9250 #[allow(improper_ctypes)]
9251 extern "unadjusted" {
9252 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")]
9253 fn vfmad_laneq_f64_(a: f64, b: f64, c: f64) -> f64;
9254 }
353b0b11 9255 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
9256 let c: f64 = simd_extract(c, LANE as u32);
9257 vfmad_laneq_f64_(b, c, a)
17df50a5
XL
9258}
9259
3c0e092e 9260/// Floating-point fused multiply-subtract from accumulator
f2b60f7d
FG
9261///
9262/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)
17df50a5
XL
9263#[inline]
9264#[target_feature(enable = "neon")]
3c0e092e 9265#[cfg_attr(test, assert_instr(fmsub))]
a2a8927a 9266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9267pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9268 let b: float64x1_t = simd_neg(b);
9269 vfma_f64(a, b, c)
17df50a5
XL
9270}
9271
3c0e092e 9272/// Floating-point fused multiply-subtract from accumulator
f2b60f7d
FG
9273///
9274/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)
17df50a5
XL
9275#[inline]
9276#[target_feature(enable = "neon")]
3c0e092e 9277#[cfg_attr(test, assert_instr(fmls))]
a2a8927a 9278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9279pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9280 let b: float64x2_t = simd_neg(b);
9281 vfmaq_f64(a, b, c)
17df50a5
XL
9282}
9283
3c0e092e 9284/// Floating-point fused Multiply-subtract to accumulator(vector)
f2b60f7d
FG
9285///
9286/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)
17df50a5
XL
9287#[inline]
9288#[target_feature(enable = "neon")]
3c0e092e 9289#[cfg_attr(test, assert_instr(fmsub))]
a2a8927a 9290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9291pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
9292 vfms_f64(a, b, vdup_n_f64(c))
17df50a5
XL
9293}
9294
3c0e092e 9295/// Floating-point fused Multiply-subtract to accumulator(vector)
f2b60f7d
FG
9296///
9297/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)
17df50a5
XL
9298#[inline]
9299#[target_feature(enable = "neon")]
3c0e092e 9300#[cfg_attr(test, assert_instr(fmls))]
a2a8927a 9301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9302pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
9303 vfmsq_f64(a, b, vdupq_n_f64(c))
17df50a5
XL
9304}
9305
3c0e092e 9306/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9307///
9308/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)
17df50a5
XL
9309#[inline]
9310#[target_feature(enable = "neon")]
3c0e092e
XL
9311#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9312#[rustc_legacy_const_generics(3)]
a2a8927a 9313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9314pub unsafe fn vfms_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11 9315 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9316 vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9317}
9318
3c0e092e 9319/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9320///
9321/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)
17df50a5
XL
9322#[inline]
9323#[target_feature(enable = "neon")]
3c0e092e
XL
9324#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9325#[rustc_legacy_const_generics(3)]
a2a8927a 9326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9327pub unsafe fn vfms_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11 9328 static_assert_uimm_bits!(LANE, 2);
3c0e092e 9329 vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9330}
9331
3c0e092e 9332/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9333///
9334/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)
17df50a5
XL
9335#[inline]
9336#[target_feature(enable = "neon")]
3c0e092e
XL
9337#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9338#[rustc_legacy_const_generics(3)]
a2a8927a 9339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9340pub unsafe fn vfmsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11 9341 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9342 vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9343}
9344
3c0e092e 9345/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9346///
9347/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)
17df50a5
XL
9348#[inline]
9349#[target_feature(enable = "neon")]
3c0e092e
XL
9350#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9351#[rustc_legacy_const_generics(3)]
a2a8927a 9352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9353pub unsafe fn vfmsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11 9354 static_assert_uimm_bits!(LANE, 2);
3c0e092e 9355 vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
17df50a5
XL
9356}
9357
3c0e092e 9358/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9359///
9360/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)
17df50a5
XL
9361#[inline]
9362#[target_feature(enable = "neon")]
3c0e092e
XL
9363#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9364#[rustc_legacy_const_generics(3)]
a2a8927a 9365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9366pub unsafe fn vfms_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
353b0b11 9367 static_assert!(LANE == 0);
3c0e092e 9368 vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9369}
9370
3c0e092e 9371/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9372///
9373/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)
17df50a5
XL
9374#[inline]
9375#[target_feature(enable = "neon")]
3c0e092e
XL
9376#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9377#[rustc_legacy_const_generics(3)]
a2a8927a 9378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9379pub unsafe fn vfms_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
353b0b11 9380 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9381 vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9382}
9383
3c0e092e 9384/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9385///
9386/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)
17df50a5
XL
9387#[inline]
9388#[target_feature(enable = "neon")]
3c0e092e
XL
9389#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9390#[rustc_legacy_const_generics(3)]
a2a8927a 9391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9392pub unsafe fn vfmsq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
353b0b11 9393 static_assert!(LANE == 0);
3c0e092e 9394 vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9395}
9396
3c0e092e 9397/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9398///
9399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)
17df50a5
XL
9400#[inline]
9401#[target_feature(enable = "neon")]
3c0e092e
XL
9402#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9403#[rustc_legacy_const_generics(3)]
a2a8927a 9404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9405pub unsafe fn vfmsq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
353b0b11 9406 static_assert_uimm_bits!(LANE, 1);
3c0e092e 9407 vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
17df50a5
XL
9408}
9409
3c0e092e 9410/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9411///
9412/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)
17df50a5
XL
9413#[inline]
9414#[target_feature(enable = "neon")]
3c0e092e
XL
9415#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9416#[rustc_legacy_const_generics(3)]
a2a8927a 9417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9418pub unsafe fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
9419 vfmas_lane_f32::<LANE>(a, -b, c)
17df50a5
XL
9420}
9421
3c0e092e 9422/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9423///
9424/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)
17df50a5
XL
9425#[inline]
9426#[target_feature(enable = "neon")]
3c0e092e
XL
9427#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9428#[rustc_legacy_const_generics(3)]
a2a8927a 9429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9430pub unsafe fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
9431 vfmas_laneq_f32::<LANE>(a, -b, c)
17df50a5
XL
9432}
9433
3c0e092e 9434/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9435///
9436/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)
17df50a5
XL
9437#[inline]
9438#[target_feature(enable = "neon")]
3c0e092e
XL
9439#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9440#[rustc_legacy_const_generics(3)]
a2a8927a 9441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9442pub unsafe fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
9443 vfmad_lane_f64::<LANE>(a, -b, c)
17df50a5
XL
9444}
9445
3c0e092e 9446/// Floating-point fused multiply-subtract to accumulator
f2b60f7d
FG
9447///
9448/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)
17df50a5
XL
9449#[inline]
9450#[target_feature(enable = "neon")]
3c0e092e
XL
9451#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9452#[rustc_legacy_const_generics(3)]
a2a8927a 9453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9454pub unsafe fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
9455 vfmad_laneq_f64::<LANE>(a, -b, c)
17df50a5
XL
9456}
9457
3c0e092e 9458/// Divide
f2b60f7d
FG
9459///
9460/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)
17df50a5
XL
9461#[inline]
9462#[target_feature(enable = "neon")]
3c0e092e 9463#[cfg_attr(test, assert_instr(fdiv))]
a2a8927a 9464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9465pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9466 simd_div(a, b)
17df50a5
XL
9467}
9468
3c0e092e 9469/// Divide
f2b60f7d
FG
9470///
9471/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)
17df50a5
XL
9472#[inline]
9473#[target_feature(enable = "neon")]
3c0e092e 9474#[cfg_attr(test, assert_instr(fdiv))]
a2a8927a 9475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9476pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9477 simd_div(a, b)
17df50a5
XL
9478}
9479
3c0e092e 9480/// Divide
f2b60f7d
FG
9481///
9482/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)
17df50a5
XL
9483#[inline]
9484#[target_feature(enable = "neon")]
3c0e092e 9485#[cfg_attr(test, assert_instr(fdiv))]
a2a8927a 9486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9487pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9488 simd_div(a, b)
17df50a5
XL
9489}
9490
3c0e092e 9491/// Divide
f2b60f7d
FG
9492///
9493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)
17df50a5
XL
9494#[inline]
9495#[target_feature(enable = "neon")]
3c0e092e 9496#[cfg_attr(test, assert_instr(fdiv))]
a2a8927a 9497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9498pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9499 simd_div(a, b)
17df50a5
XL
9500}
9501
3c0e092e 9502/// Subtract
f2b60f7d
FG
9503///
9504/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)
17df50a5
XL
9505#[inline]
9506#[target_feature(enable = "neon")]
3c0e092e 9507#[cfg_attr(test, assert_instr(fsub))]
a2a8927a 9508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9509pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9510 simd_sub(a, b)
17df50a5
XL
9511}
9512
3c0e092e 9513/// Subtract
f2b60f7d
FG
9514///
9515/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)
17df50a5
XL
9516#[inline]
9517#[target_feature(enable = "neon")]
3c0e092e 9518#[cfg_attr(test, assert_instr(fsub))]
a2a8927a 9519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9520pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9521 simd_sub(a, b)
17df50a5
XL
9522}
9523
3c0e092e 9524/// Subtract
f2b60f7d
FG
9525///
9526/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)
17df50a5
XL
9527#[inline]
9528#[target_feature(enable = "neon")]
3c0e092e 9529#[cfg_attr(test, assert_instr(nop))]
a2a8927a 9530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9531pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 {
a2a8927a 9532 a.wrapping_sub(b)
17df50a5
XL
9533}
9534
3c0e092e 9535/// Subtract
f2b60f7d
FG
9536///
9537/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)
17df50a5
XL
9538#[inline]
9539#[target_feature(enable = "neon")]
3c0e092e 9540#[cfg_attr(test, assert_instr(nop))]
a2a8927a 9541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9542pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 {
a2a8927a 9543 a.wrapping_sub(b)
17df50a5
XL
9544}
9545
3c0e092e 9546/// Add
f2b60f7d
FG
9547///
9548/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)
17df50a5
XL
9549#[inline]
9550#[target_feature(enable = "neon")]
3c0e092e 9551#[cfg_attr(test, assert_instr(nop))]
a2a8927a 9552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9553pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 {
a2a8927a 9554 a.wrapping_add(b)
17df50a5
XL
9555}
9556
3c0e092e 9557/// Add
f2b60f7d
FG
9558///
9559/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)
17df50a5
XL
9560#[inline]
9561#[target_feature(enable = "neon")]
3c0e092e 9562#[cfg_attr(test, assert_instr(nop))]
a2a8927a 9563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9564pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 {
a2a8927a 9565 a.wrapping_add(b)
17df50a5
XL
9566}
9567
3c0e092e 9568/// Floating-point add across vector
f2b60f7d
FG
9569///
9570/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)
17df50a5
XL
9571#[inline]
9572#[target_feature(enable = "neon")]
3c0e092e 9573#[cfg_attr(test, assert_instr(faddp))]
a2a8927a 9574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9575pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 {
9576 #[allow(improper_ctypes)]
9577 extern "unadjusted" {
9578 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddv.f32.v2f32")]
9579 fn vaddv_f32_(a: float32x2_t) -> f32;
9580 }
9581 vaddv_f32_(a)
17df50a5
XL
9582}
9583
3c0e092e 9584/// Floating-point add across vector
f2b60f7d
FG
9585///
9586/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)
17df50a5
XL
9587#[inline]
9588#[target_feature(enable = "neon")]
3c0e092e 9589#[cfg_attr(test, assert_instr(faddp))]
a2a8927a 9590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9591pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 {
9592 #[allow(improper_ctypes)]
9593 extern "unadjusted" {
9594 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddv.f32.v4f32")]
9595 fn vaddvq_f32_(a: float32x4_t) -> f32;
9596 }
9597 vaddvq_f32_(a)
17df50a5
XL
9598}
9599
3c0e092e 9600/// Floating-point add across vector
f2b60f7d
FG
9601///
9602/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)
17df50a5
XL
9603#[inline]
9604#[target_feature(enable = "neon")]
3c0e092e 9605#[cfg_attr(test, assert_instr(faddp))]
a2a8927a 9606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9607pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 {
9608 #[allow(improper_ctypes)]
9609 extern "unadjusted" {
9610 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddv.f64.v2f64")]
9611 fn vaddvq_f64_(a: float64x2_t) -> f64;
9612 }
9613 vaddvq_f64_(a)
17df50a5
XL
9614}
9615
3c0e092e 9616/// Signed Add Long across Vector
f2b60f7d
FG
9617///
9618/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)
17df50a5
XL
9619#[inline]
9620#[target_feature(enable = "neon")]
3c0e092e 9621#[cfg_attr(test, assert_instr(saddlv))]
a2a8927a 9622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9623pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 {
9624 #[allow(improper_ctypes)]
9625 extern "unadjusted" {
9626 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i32.v4i16")]
9627 fn vaddlv_s16_(a: int16x4_t) -> i32;
9628 }
9629 vaddlv_s16_(a)
17df50a5
XL
9630}
9631
3c0e092e 9632/// Signed Add Long across Vector
f2b60f7d
FG
9633///
9634/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)
17df50a5
XL
9635#[inline]
9636#[target_feature(enable = "neon")]
3c0e092e 9637#[cfg_attr(test, assert_instr(saddlv))]
a2a8927a 9638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9639pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 {
9640 #[allow(improper_ctypes)]
9641 extern "unadjusted" {
9642 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i32.v8i16")]
9643 fn vaddlvq_s16_(a: int16x8_t) -> i32;
9644 }
9645 vaddlvq_s16_(a)
17df50a5
XL
9646}
9647
3c0e092e 9648/// Signed Add Long across Vector
f2b60f7d
FG
9649///
9650/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)
17df50a5
XL
9651#[inline]
9652#[target_feature(enable = "neon")]
3c0e092e 9653#[cfg_attr(test, assert_instr(saddlp))]
a2a8927a 9654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9655pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 {
9656 #[allow(improper_ctypes)]
9657 extern "unadjusted" {
9658 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i64.v2i32")]
9659 fn vaddlv_s32_(a: int32x2_t) -> i64;
9660 }
9661 vaddlv_s32_(a)
17df50a5
XL
9662}
9663
3c0e092e 9664/// Signed Add Long across Vector
f2b60f7d
FG
9665///
9666/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)
17df50a5
XL
9667#[inline]
9668#[target_feature(enable = "neon")]
3c0e092e 9669#[cfg_attr(test, assert_instr(saddlv))]
a2a8927a 9670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9671pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 {
9672 #[allow(improper_ctypes)]
9673 extern "unadjusted" {
9674 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i64.v4i32")]
9675 fn vaddlvq_s32_(a: int32x4_t) -> i64;
9676 }
9677 vaddlvq_s32_(a)
17df50a5
XL
9678}
9679
3c0e092e 9680/// Unsigned Add Long across Vector
f2b60f7d
FG
9681///
9682/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)
17df50a5
XL
9683#[inline]
9684#[target_feature(enable = "neon")]
3c0e092e 9685#[cfg_attr(test, assert_instr(uaddlv))]
a2a8927a 9686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9687pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 {
9688 #[allow(improper_ctypes)]
9689 extern "unadjusted" {
9690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16")]
9691 fn vaddlv_u16_(a: uint16x4_t) -> u32;
9692 }
9693 vaddlv_u16_(a)
17df50a5
XL
9694}
9695
3c0e092e 9696/// Unsigned Add Long across Vector
f2b60f7d
FG
9697///
9698/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)
17df50a5
XL
9699#[inline]
9700#[target_feature(enable = "neon")]
3c0e092e 9701#[cfg_attr(test, assert_instr(uaddlv))]
a2a8927a 9702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
9703pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 {
9704 #[allow(improper_ctypes)]
9705 extern "unadjusted" {
9706 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16")]
9707 fn vaddlvq_u16_(a: uint16x8_t) -> u32;
9708 }
9709 vaddlvq_u16_(a)
17df50a5
XL
9710}
9711
3c0e092e 9712/// Unsigned Add Long across Vector
f2b60f7d
FG
9713///
9714/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)
17df50a5
XL
9715#[inline]
9716#[target_feature(enable = "neon")]
3c0e092e 9717#[cfg_attr(test, assert_instr(uaddlp))]
a2a8927a 9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9719pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 {
17df50a5 9720 #[allow(improper_ctypes)]
c295e0f8 9721 extern "unadjusted" {
3c0e092e
XL
9722 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32")]
9723 fn vaddlv_u32_(a: uint32x2_t) -> u64;
17df50a5 9724 }
3c0e092e 9725 vaddlv_u32_(a)
17df50a5
XL
9726}
9727
3c0e092e 9728/// Unsigned Add Long across Vector
f2b60f7d
FG
9729///
9730/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)
17df50a5
XL
9731#[inline]
9732#[target_feature(enable = "neon")]
3c0e092e 9733#[cfg_attr(test, assert_instr(uaddlv))]
a2a8927a 9734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9735pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 {
17df50a5 9736 #[allow(improper_ctypes)]
c295e0f8 9737 extern "unadjusted" {
3c0e092e
XL
9738 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32")]
9739 fn vaddlvq_u32_(a: uint32x4_t) -> u64;
17df50a5 9740 }
3c0e092e 9741 vaddlvq_u32_(a)
17df50a5
XL
9742}
9743
3c0e092e 9744/// Signed Subtract Wide
f2b60f7d
FG
9745///
9746/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)
17df50a5
XL
9747#[inline]
9748#[target_feature(enable = "neon")]
3c0e092e 9749#[cfg_attr(test, assert_instr(ssubw))]
a2a8927a 9750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9751pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
353b0b11 9752 let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 9753 simd_sub(a, simd_cast(c))
17df50a5
XL
9754}
9755
3c0e092e 9756/// Signed Subtract Wide
f2b60f7d
FG
9757///
9758/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)
17df50a5
XL
9759#[inline]
9760#[target_feature(enable = "neon")]
3c0e092e 9761#[cfg_attr(test, assert_instr(ssubw))]
a2a8927a 9762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9763pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
353b0b11 9764 let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e 9765 simd_sub(a, simd_cast(c))
17df50a5
XL
9766}
9767
3c0e092e 9768/// Signed Subtract Wide
f2b60f7d
FG
9769///
9770/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)
17df50a5
XL
9771#[inline]
9772#[target_feature(enable = "neon")]
3c0e092e 9773#[cfg_attr(test, assert_instr(ssubw))]
a2a8927a 9774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9775pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
353b0b11 9776 let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e 9777 simd_sub(a, simd_cast(c))
17df50a5
XL
9778}
9779
3c0e092e 9780/// Unsigned Subtract Wide
f2b60f7d
FG
9781///
9782/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)
17df50a5
XL
9783#[inline]
9784#[target_feature(enable = "neon")]
3c0e092e 9785#[cfg_attr(test, assert_instr(usubw))]
a2a8927a 9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9787pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
353b0b11 9788 let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 9789 simd_sub(a, simd_cast(c))
17df50a5
XL
9790}
9791
3c0e092e 9792/// Unsigned Subtract Wide
f2b60f7d
FG
9793///
9794/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)
17df50a5
XL
9795#[inline]
9796#[target_feature(enable = "neon")]
3c0e092e 9797#[cfg_attr(test, assert_instr(usubw))]
a2a8927a 9798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9799pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
353b0b11 9800 let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e 9801 simd_sub(a, simd_cast(c))
17df50a5
XL
9802}
9803
3c0e092e 9804/// Unsigned Subtract Wide
f2b60f7d
FG
9805///
9806/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)
17df50a5
XL
9807#[inline]
9808#[target_feature(enable = "neon")]
3c0e092e 9809#[cfg_attr(test, assert_instr(usubw))]
a2a8927a 9810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9811pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
353b0b11 9812 let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e 9813 simd_sub(a, simd_cast(c))
17df50a5
XL
9814}
9815
3c0e092e 9816/// Signed Subtract Long
f2b60f7d
FG
9817///
9818/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)
17df50a5
XL
9819#[inline]
9820#[target_feature(enable = "neon")]
3c0e092e 9821#[cfg_attr(test, assert_instr(ssubl))]
a2a8927a 9822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9823pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
353b0b11 9824 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 9825 let d: int16x8_t = simd_cast(c);
353b0b11 9826 let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
9827 let f: int16x8_t = simd_cast(e);
9828 simd_sub(d, f)
17df50a5
XL
9829}
9830
3c0e092e 9831/// Signed Subtract Long
f2b60f7d
FG
9832///
9833/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)
17df50a5
XL
9834#[inline]
9835#[target_feature(enable = "neon")]
3c0e092e 9836#[cfg_attr(test, assert_instr(ssubl))]
a2a8927a 9837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9838pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11 9839 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e 9840 let d: int32x4_t = simd_cast(c);
353b0b11 9841 let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e
XL
9842 let f: int32x4_t = simd_cast(e);
9843 simd_sub(d, f)
17df50a5
XL
9844}
9845
3c0e092e 9846/// Signed Subtract Long
f2b60f7d
FG
9847///
9848/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)
17df50a5
XL
9849#[inline]
9850#[target_feature(enable = "neon")]
3c0e092e 9851#[cfg_attr(test, assert_instr(ssubl))]
a2a8927a 9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9853pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11 9854 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e 9855 let d: int64x2_t = simd_cast(c);
353b0b11 9856 let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e
XL
9857 let f: int64x2_t = simd_cast(e);
9858 simd_sub(d, f)
17df50a5
XL
9859}
9860
3c0e092e 9861/// Unsigned Subtract Long
f2b60f7d
FG
9862///
9863/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)
17df50a5
XL
9864#[inline]
9865#[target_feature(enable = "neon")]
3c0e092e 9866#[cfg_attr(test, assert_instr(usubl))]
a2a8927a 9867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9868pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
353b0b11 9869 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 9870 let d: uint16x8_t = simd_cast(c);
353b0b11 9871 let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
9872 let f: uint16x8_t = simd_cast(e);
9873 simd_sub(d, f)
17df50a5
XL
9874}
9875
3c0e092e 9876/// Unsigned Subtract Long
f2b60f7d
FG
9877///
9878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)
17df50a5
XL
9879#[inline]
9880#[target_feature(enable = "neon")]
3c0e092e 9881#[cfg_attr(test, assert_instr(usubl))]
a2a8927a 9882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9883pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
353b0b11 9884 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e 9885 let d: uint32x4_t = simd_cast(c);
353b0b11 9886 let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e
XL
9887 let f: uint32x4_t = simd_cast(e);
9888 simd_sub(d, f)
17df50a5
XL
9889}
9890
3c0e092e 9891/// Unsigned Subtract Long
f2b60f7d
FG
9892///
9893/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)
17df50a5
XL
9894#[inline]
9895#[target_feature(enable = "neon")]
3c0e092e 9896#[cfg_attr(test, assert_instr(usubl))]
a2a8927a 9897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 9898pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
353b0b11 9899 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e 9900 let d: uint64x2_t = simd_cast(c);
353b0b11 9901 let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e
XL
9902 let f: uint64x2_t = simd_cast(e);
9903 simd_sub(d, f)
17df50a5
XL
9904}
9905
3c0e092e 9906/// Bit clear and exclusive OR
f2b60f7d
FG
9907///
9908/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)
17df50a5 9909#[inline]
3c0e092e
XL
9910#[target_feature(enable = "neon,sha3")]
9911#[cfg_attr(test, assert_instr(bcax))]
9912pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9913 #[allow(improper_ctypes)]
9914 extern "unadjusted" {
9915 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxs.v16i8")]
9916 fn vbcaxq_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9917 }
9918 vbcaxq_s8_(a, b, c)
17df50a5
XL
9919}
9920
3c0e092e 9921/// Bit clear and exclusive OR
f2b60f7d
FG
9922///
9923/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)
17df50a5 9924#[inline]
3c0e092e
XL
9925#[target_feature(enable = "neon,sha3")]
9926#[cfg_attr(test, assert_instr(bcax))]
9927pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9928 #[allow(improper_ctypes)]
9929 extern "unadjusted" {
9930 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxs.v8i16")]
9931 fn vbcaxq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9932 }
9933 vbcaxq_s16_(a, b, c)
17df50a5
XL
9934}
9935
3c0e092e 9936/// Bit clear and exclusive OR
f2b60f7d
FG
9937///
9938/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)
17df50a5 9939#[inline]
3c0e092e
XL
9940#[target_feature(enable = "neon,sha3")]
9941#[cfg_attr(test, assert_instr(bcax))]
9942pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9943 #[allow(improper_ctypes)]
9944 extern "unadjusted" {
9945 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxs.v4i32")]
9946 fn vbcaxq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9947 }
9948 vbcaxq_s32_(a, b, c)
17df50a5
XL
9949}
9950
3c0e092e 9951/// Bit clear and exclusive OR
f2b60f7d
FG
9952///
9953/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)
17df50a5 9954#[inline]
3c0e092e
XL
9955#[target_feature(enable = "neon,sha3")]
9956#[cfg_attr(test, assert_instr(bcax))]
9957pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9958 #[allow(improper_ctypes)]
9959 extern "unadjusted" {
9960 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxs.v2i64")]
9961 fn vbcaxq_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9962 }
9963 vbcaxq_s64_(a, b, c)
17df50a5
XL
9964}
9965
3c0e092e 9966/// Bit clear and exclusive OR
f2b60f7d
FG
9967///
9968/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)
17df50a5 9969#[inline]
3c0e092e
XL
9970#[target_feature(enable = "neon,sha3")]
9971#[cfg_attr(test, assert_instr(bcax))]
9972pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9973 #[allow(improper_ctypes)]
9974 extern "unadjusted" {
9975 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxu.v16i8")]
9976 fn vbcaxq_u8_(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9977 }
9978 vbcaxq_u8_(a, b, c)
17df50a5
XL
9979}
9980
3c0e092e 9981/// Bit clear and exclusive OR
f2b60f7d
FG
9982///
9983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)
17df50a5 9984#[inline]
3c0e092e
XL
9985#[target_feature(enable = "neon,sha3")]
9986#[cfg_attr(test, assert_instr(bcax))]
9987pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
9988 #[allow(improper_ctypes)]
9989 extern "unadjusted" {
9990 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxu.v8i16")]
9991 fn vbcaxq_u16_(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
9992 }
9993 vbcaxq_u16_(a, b, c)
17df50a5
XL
9994}
9995
3c0e092e 9996/// Bit clear and exclusive OR
f2b60f7d
FG
9997///
9998/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)
17df50a5 9999#[inline]
3c0e092e
XL
10000#[target_feature(enable = "neon,sha3")]
10001#[cfg_attr(test, assert_instr(bcax))]
10002pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10003 #[allow(improper_ctypes)]
10004 extern "unadjusted" {
10005 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxu.v4i32")]
10006 fn vbcaxq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10007 }
10008 vbcaxq_u32_(a, b, c)
17df50a5
XL
10009}
10010
3c0e092e 10011/// Bit clear and exclusive OR
f2b60f7d
FG
10012///
10013/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)
17df50a5 10014#[inline]
3c0e092e
XL
10015#[target_feature(enable = "neon,sha3")]
10016#[cfg_attr(test, assert_instr(bcax))]
10017pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10018 #[allow(improper_ctypes)]
10019 extern "unadjusted" {
10020 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.bcaxu.v2i64")]
10021 fn vbcaxq_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10022 }
10023 vbcaxq_u64_(a, b, c)
17df50a5
XL
10024}
10025
3c0e092e 10026/// Floating-point complex add
f2b60f7d
FG
10027///
10028/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)
17df50a5 10029#[inline]
3c0e092e
XL
10030#[target_feature(enable = "neon,fcma")]
10031#[cfg_attr(test, assert_instr(fcadd))]
10032pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10033 #[allow(improper_ctypes)]
10034 extern "unadjusted" {
10035 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32")]
10036 fn vcadd_rot270_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10037 }
10038 vcadd_rot270_f32_(a, b)
17df50a5
XL
10039}
10040
3c0e092e 10041/// Floating-point complex add
f2b60f7d
FG
10042///
10043/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)
17df50a5 10044#[inline]
3c0e092e
XL
10045#[target_feature(enable = "neon,fcma")]
10046#[cfg_attr(test, assert_instr(fcadd))]
10047pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
10048 #[allow(improper_ctypes)]
10049 extern "unadjusted" {
10050 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32")]
10051 fn vcaddq_rot270_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
10052 }
10053 vcaddq_rot270_f32_(a, b)
17df50a5
XL
10054}
10055
3c0e092e 10056/// Floating-point complex add
f2b60f7d
FG
10057///
10058/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)
17df50a5 10059#[inline]
3c0e092e
XL
10060#[target_feature(enable = "neon,fcma")]
10061#[cfg_attr(test, assert_instr(fcadd))]
10062pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10063 #[allow(improper_ctypes)]
10064 extern "unadjusted" {
10065 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64")]
10066 fn vcaddq_rot270_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10067 }
10068 vcaddq_rot270_f64_(a, b)
17df50a5
XL
10069}
10070
3c0e092e 10071/// Floating-point complex add
f2b60f7d
FG
10072///
10073/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)
17df50a5 10074#[inline]
3c0e092e
XL
10075#[target_feature(enable = "neon,fcma")]
10076#[cfg_attr(test, assert_instr(fcadd))]
10077pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10078 #[allow(improper_ctypes)]
10079 extern "unadjusted" {
10080 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32")]
10081 fn vcadd_rot90_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10082 }
10083 vcadd_rot90_f32_(a, b)
17df50a5
XL
10084}
10085
3c0e092e 10086/// Floating-point complex add
f2b60f7d
FG
10087///
10088/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)
17df50a5 10089#[inline]
3c0e092e
XL
10090#[target_feature(enable = "neon,fcma")]
10091#[cfg_attr(test, assert_instr(fcadd))]
10092pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
17df50a5 10093 #[allow(improper_ctypes)]
c295e0f8 10094 extern "unadjusted" {
3c0e092e
XL
10095 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32")]
10096 fn vcaddq_rot90_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
17df50a5 10097 }
3c0e092e 10098 vcaddq_rot90_f32_(a, b)
17df50a5
XL
10099}
10100
3c0e092e 10101/// Floating-point complex add
f2b60f7d
FG
10102///
10103/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)
17df50a5 10104#[inline]
3c0e092e
XL
10105#[target_feature(enable = "neon,fcma")]
10106#[cfg_attr(test, assert_instr(fcadd))]
10107pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10108 #[allow(improper_ctypes)]
10109 extern "unadjusted" {
10110 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64")]
10111 fn vcaddq_rot90_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10112 }
10113 vcaddq_rot90_f64_(a, b)
17df50a5
XL
10114}
10115
3c0e092e 10116/// Floating-point complex multiply accumulate
f2b60f7d
FG
10117///
10118/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)
17df50a5 10119#[inline]
3c0e092e
XL
10120#[target_feature(enable = "neon,fcma")]
10121#[cfg_attr(test, assert_instr(fcmla))]
10122pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10123 #[allow(improper_ctypes)]
10124 extern "unadjusted" {
10125 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32")]
10126 fn vcmla_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10127 }
10128 vcmla_f32_(a, b, c)
17df50a5
XL
10129}
10130
3c0e092e 10131/// Floating-point complex multiply accumulate
f2b60f7d
FG
10132///
10133/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)
17df50a5 10134#[inline]
3c0e092e
XL
10135#[target_feature(enable = "neon,fcma")]
10136#[cfg_attr(test, assert_instr(fcmla))]
10137pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10138 #[allow(improper_ctypes)]
10139 extern "unadjusted" {
10140 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32")]
10141 fn vcmlaq_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10142 }
10143 vcmlaq_f32_(a, b, c)
17df50a5
XL
10144}
10145
3c0e092e 10146/// Floating-point complex multiply accumulate
f2b60f7d
FG
10147///
10148/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)
17df50a5 10149#[inline]
3c0e092e
XL
10150#[target_feature(enable = "neon,fcma")]
10151#[cfg_attr(test, assert_instr(fcmla))]
10152pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
17df50a5 10153 #[allow(improper_ctypes)]
c295e0f8 10154 extern "unadjusted" {
3c0e092e
XL
10155 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64")]
10156 fn vcmlaq_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
17df50a5 10157 }
3c0e092e 10158 vcmlaq_f64_(a, b, c)
17df50a5
XL
10159}
10160
3c0e092e 10161/// Floating-point complex multiply accumulate
f2b60f7d
FG
10162///
10163/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)
17df50a5 10164#[inline]
3c0e092e
XL
10165#[target_feature(enable = "neon,fcma")]
10166#[cfg_attr(test, assert_instr(fcmla))]
10167pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10168 #[allow(improper_ctypes)]
10169 extern "unadjusted" {
10170 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32")]
10171 fn vcmla_rot90_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10172 }
10173 vcmla_rot90_f32_(a, b, c)
17df50a5
XL
10174}
10175
3c0e092e 10176/// Floating-point complex multiply accumulate
f2b60f7d
FG
10177///
10178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)
17df50a5 10179#[inline]
3c0e092e
XL
10180#[target_feature(enable = "neon,fcma")]
10181#[cfg_attr(test, assert_instr(fcmla))]
10182pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10183 #[allow(improper_ctypes)]
10184 extern "unadjusted" {
10185 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32")]
10186 fn vcmlaq_rot90_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10187 }
10188 vcmlaq_rot90_f32_(a, b, c)
17df50a5
XL
10189}
10190
3c0e092e 10191/// Floating-point complex multiply accumulate
f2b60f7d
FG
10192///
10193/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)
17df50a5 10194#[inline]
3c0e092e
XL
10195#[target_feature(enable = "neon,fcma")]
10196#[cfg_attr(test, assert_instr(fcmla))]
10197pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10198 #[allow(improper_ctypes)]
10199 extern "unadjusted" {
10200 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64")]
10201 fn vcmlaq_rot90_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10202 }
10203 vcmlaq_rot90_f64_(a, b, c)
17df50a5
XL
10204}
10205
3c0e092e 10206/// Floating-point complex multiply accumulate
f2b60f7d
FG
10207///
10208/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)
17df50a5 10209#[inline]
3c0e092e
XL
10210#[target_feature(enable = "neon,fcma")]
10211#[cfg_attr(test, assert_instr(fcmla))]
10212pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10213 #[allow(improper_ctypes)]
10214 extern "unadjusted" {
10215 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32")]
10216 fn vcmla_rot180_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10217 }
10218 vcmla_rot180_f32_(a, b, c)
17df50a5
XL
10219}
10220
3c0e092e 10221/// Floating-point complex multiply accumulate
f2b60f7d
FG
10222///
10223/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)
17df50a5 10224#[inline]
3c0e092e
XL
10225#[target_feature(enable = "neon,fcma")]
10226#[cfg_attr(test, assert_instr(fcmla))]
10227pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10228 #[allow(improper_ctypes)]
10229 extern "unadjusted" {
10230 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32")]
10231 fn vcmlaq_rot180_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10232 }
10233 vcmlaq_rot180_f32_(a, b, c)
17df50a5
XL
10234}
10235
3c0e092e 10236/// Floating-point complex multiply accumulate
f2b60f7d
FG
10237///
10238/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)
17df50a5 10239#[inline]
3c0e092e
XL
10240#[target_feature(enable = "neon,fcma")]
10241#[cfg_attr(test, assert_instr(fcmla))]
10242pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10243 #[allow(improper_ctypes)]
10244 extern "unadjusted" {
10245 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64")]
10246 fn vcmlaq_rot180_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10247 }
10248 vcmlaq_rot180_f64_(a, b, c)
17df50a5
XL
10249}
10250
3c0e092e 10251/// Floating-point complex multiply accumulate
f2b60f7d
FG
10252///
10253/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)
17df50a5 10254#[inline]
3c0e092e
XL
10255#[target_feature(enable = "neon,fcma")]
10256#[cfg_attr(test, assert_instr(fcmla))]
10257pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10258 #[allow(improper_ctypes)]
10259 extern "unadjusted" {
10260 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32")]
10261 fn vcmla_rot270_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10262 }
10263 vcmla_rot270_f32_(a, b, c)
17df50a5
XL
10264}
10265
3c0e092e 10266/// Floating-point complex multiply accumulate
f2b60f7d
FG
10267///
10268/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)
17df50a5 10269#[inline]
3c0e092e
XL
10270#[target_feature(enable = "neon,fcma")]
10271#[cfg_attr(test, assert_instr(fcmla))]
10272pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10273 #[allow(improper_ctypes)]
10274 extern "unadjusted" {
10275 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32")]
10276 fn vcmlaq_rot270_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10277 }
10278 vcmlaq_rot270_f32_(a, b, c)
17df50a5
XL
10279}
10280
3c0e092e 10281/// Floating-point complex multiply accumulate
f2b60f7d
FG
10282///
10283/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)
17df50a5 10284#[inline]
3c0e092e
XL
10285#[target_feature(enable = "neon,fcma")]
10286#[cfg_attr(test, assert_instr(fcmla))]
10287pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10288 #[allow(improper_ctypes)]
10289 extern "unadjusted" {
10290 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64")]
10291 fn vcmlaq_rot270_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10292 }
10293 vcmlaq_rot270_f64_(a, b, c)
17df50a5
XL
10294}
10295
3c0e092e 10296/// Floating-point complex multiply accumulate
f2b60f7d
FG
10297///
10298/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)
17df50a5 10299#[inline]
3c0e092e
XL
10300#[target_feature(enable = "neon,fcma")]
10301#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10302#[rustc_legacy_const_generics(3)]
10303pub unsafe fn vcmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11
FG
10304 static_assert!(LANE == 0);
10305 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10306 vcmla_f32(a, b, c)
17df50a5
XL
10307}
10308
3c0e092e 10309/// Floating-point complex multiply accumulate
f2b60f7d
FG
10310///
10311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)
17df50a5 10312#[inline]
3c0e092e
XL
10313#[target_feature(enable = "neon,fcma")]
10314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10315#[rustc_legacy_const_generics(3)]
10316pub unsafe fn vcmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11
FG
10317 static_assert_uimm_bits!(LANE, 1);
10318 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10319 vcmla_f32(a, b, c)
17df50a5
XL
10320}
10321
3c0e092e 10322/// Floating-point complex multiply accumulate
f2b60f7d
FG
10323///
10324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)
17df50a5 10325#[inline]
3c0e092e
XL
10326#[target_feature(enable = "neon,fcma")]
10327#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10328#[rustc_legacy_const_generics(3)]
10329pub unsafe fn vcmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11
FG
10330 static_assert!(LANE == 0);
10331 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10332 vcmlaq_f32(a, b, c)
17df50a5
XL
10333}
10334
3c0e092e 10335/// Floating-point complex multiply accumulate
f2b60f7d
FG
10336///
10337/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)
17df50a5 10338#[inline]
3c0e092e
XL
10339#[target_feature(enable = "neon,fcma")]
10340#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10341#[rustc_legacy_const_generics(3)]
10342pub unsafe fn vcmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11
FG
10343 static_assert_uimm_bits!(LANE, 1);
10344 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10345 vcmlaq_f32(a, b, c)
17df50a5
XL
10346}
10347
3c0e092e 10348/// Floating-point complex multiply accumulate
f2b60f7d
FG
10349///
10350/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)
17df50a5 10351#[inline]
3c0e092e
XL
10352#[target_feature(enable = "neon,fcma")]
10353#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10354#[rustc_legacy_const_generics(3)]
10355pub unsafe fn vcmla_rot90_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11
FG
10356 static_assert!(LANE == 0);
10357 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10358 vcmla_rot90_f32(a, b, c)
17df50a5
XL
10359}
10360
3c0e092e 10361/// Floating-point complex multiply accumulate
f2b60f7d
FG
10362///
10363/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)
17df50a5 10364#[inline]
3c0e092e
XL
10365#[target_feature(enable = "neon,fcma")]
10366#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10367#[rustc_legacy_const_generics(3)]
10368pub unsafe fn vcmla_rot90_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11
FG
10369 static_assert_uimm_bits!(LANE, 1);
10370 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10371 vcmla_rot90_f32(a, b, c)
17df50a5
XL
10372}
10373
3c0e092e 10374/// Floating-point complex multiply accumulate
f2b60f7d
FG
10375///
10376/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)
17df50a5 10377#[inline]
3c0e092e
XL
10378#[target_feature(enable = "neon,fcma")]
10379#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10380#[rustc_legacy_const_generics(3)]
10381pub unsafe fn vcmlaq_rot90_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11
FG
10382 static_assert!(LANE == 0);
10383 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10384 vcmlaq_rot90_f32(a, b, c)
17df50a5
XL
10385}
10386
3c0e092e 10387/// Floating-point complex multiply accumulate
f2b60f7d
FG
10388///
10389/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)
17df50a5 10390#[inline]
3c0e092e
XL
10391#[target_feature(enable = "neon,fcma")]
10392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10393#[rustc_legacy_const_generics(3)]
10394pub unsafe fn vcmlaq_rot90_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11
FG
10395 static_assert_uimm_bits!(LANE, 1);
10396 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10397 vcmlaq_rot90_f32(a, b, c)
17df50a5
XL
10398}
10399
3c0e092e 10400/// Floating-point complex multiply accumulate
f2b60f7d
FG
10401///
10402/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)
17df50a5 10403#[inline]
3c0e092e
XL
10404#[target_feature(enable = "neon,fcma")]
10405#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10406#[rustc_legacy_const_generics(3)]
10407pub unsafe fn vcmla_rot180_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11
FG
10408 static_assert!(LANE == 0);
10409 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10410 vcmla_rot180_f32(a, b, c)
17df50a5
XL
10411}
10412
3c0e092e 10413/// Floating-point complex multiply accumulate
f2b60f7d
FG
10414///
10415/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)
17df50a5 10416#[inline]
3c0e092e
XL
10417#[target_feature(enable = "neon,fcma")]
10418#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10419#[rustc_legacy_const_generics(3)]
10420pub unsafe fn vcmla_rot180_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11
FG
10421 static_assert_uimm_bits!(LANE, 1);
10422 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10423 vcmla_rot180_f32(a, b, c)
17df50a5
XL
10424}
10425
3c0e092e 10426/// Floating-point complex multiply accumulate
f2b60f7d
FG
10427///
10428/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)
17df50a5 10429#[inline]
3c0e092e
XL
10430#[target_feature(enable = "neon,fcma")]
10431#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10432#[rustc_legacy_const_generics(3)]
10433pub unsafe fn vcmlaq_rot180_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11
FG
10434 static_assert!(LANE == 0);
10435 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10436 vcmlaq_rot180_f32(a, b, c)
17df50a5
XL
10437}
10438
3c0e092e 10439/// Floating-point complex multiply accumulate
f2b60f7d
FG
10440///
10441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)
17df50a5 10442#[inline]
3c0e092e
XL
10443#[target_feature(enable = "neon,fcma")]
10444#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10445#[rustc_legacy_const_generics(3)]
10446pub unsafe fn vcmlaq_rot180_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11
FG
10447 static_assert_uimm_bits!(LANE, 1);
10448 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10449 vcmlaq_rot180_f32(a, b, c)
17df50a5
XL
10450}
10451
3c0e092e 10452/// Floating-point complex multiply accumulate
f2b60f7d
FG
10453///
10454/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)
17df50a5 10455#[inline]
3c0e092e
XL
10456#[target_feature(enable = "neon,fcma")]
10457#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10458#[rustc_legacy_const_generics(3)]
10459pub unsafe fn vcmla_rot270_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
353b0b11
FG
10460 static_assert!(LANE == 0);
10461 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10462 vcmla_rot270_f32(a, b, c)
17df50a5
XL
10463}
10464
3c0e092e 10465/// Floating-point complex multiply accumulate
f2b60f7d
FG
10466///
10467/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)
17df50a5 10468#[inline]
3c0e092e
XL
10469#[target_feature(enable = "neon,fcma")]
10470#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10471#[rustc_legacy_const_generics(3)]
10472pub unsafe fn vcmla_rot270_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
353b0b11
FG
10473 static_assert_uimm_bits!(LANE, 1);
10474 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10475 vcmla_rot270_f32(a, b, c)
17df50a5
XL
10476}
10477
3c0e092e 10478/// Floating-point complex multiply accumulate
f2b60f7d
FG
10479///
10480/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)
17df50a5 10481#[inline]
3c0e092e
XL
10482#[target_feature(enable = "neon,fcma")]
10483#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10484#[rustc_legacy_const_generics(3)]
10485pub unsafe fn vcmlaq_rot270_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
353b0b11
FG
10486 static_assert!(LANE == 0);
10487 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10488 vcmlaq_rot270_f32(a, b, c)
17df50a5
XL
10489}
10490
3c0e092e 10491/// Floating-point complex multiply accumulate
f2b60f7d
FG
10492///
10493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)
17df50a5 10494#[inline]
3c0e092e
XL
10495#[target_feature(enable = "neon,fcma")]
10496#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10497#[rustc_legacy_const_generics(3)]
10498pub unsafe fn vcmlaq_rot270_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
353b0b11
FG
10499 static_assert_uimm_bits!(LANE, 1);
10500 let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
3c0e092e 10501 vcmlaq_rot270_f32(a, b, c)
17df50a5
XL
10502}
10503
3c0e092e 10504/// Dot product arithmetic
f2b60f7d
FG
10505///
10506/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)
17df50a5 10507#[inline]
3c0e092e
XL
10508#[target_feature(enable = "neon,dotprod")]
10509#[cfg_attr(test, assert_instr(sdot))]
10510pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
10511 #[allow(improper_ctypes)]
10512 extern "unadjusted" {
10513 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8")]
10514 fn vdot_s32_(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t;
10515 }
10516 vdot_s32_(a, b, c)
17df50a5
XL
10517}
10518
3c0e092e 10519/// Dot product arithmetic
f2b60f7d
FG
10520///
10521/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)
17df50a5 10522#[inline]
3c0e092e
XL
10523#[target_feature(enable = "neon,dotprod")]
10524#[cfg_attr(test, assert_instr(sdot))]
10525pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
10526 #[allow(improper_ctypes)]
10527 extern "unadjusted" {
10528 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8")]
10529 fn vdotq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t;
10530 }
10531 vdotq_s32_(a, b, c)
17df50a5
XL
10532}
10533
3c0e092e 10534/// Dot product arithmetic
f2b60f7d
FG
10535///
10536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)
17df50a5 10537#[inline]
3c0e092e
XL
10538#[target_feature(enable = "neon,dotprod")]
10539#[cfg_attr(test, assert_instr(udot))]
10540pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
10541 #[allow(improper_ctypes)]
10542 extern "unadjusted" {
10543 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v2i32.v8i8")]
10544 fn vdot_u32_(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t;
10545 }
10546 vdot_u32_(a, b, c)
17df50a5
XL
10547}
10548
3c0e092e 10549/// Dot product arithmetic
f2b60f7d
FG
10550///
10551/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)
17df50a5 10552#[inline]
3c0e092e
XL
10553#[target_feature(enable = "neon,dotprod")]
10554#[cfg_attr(test, assert_instr(udot))]
10555pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
10556 #[allow(improper_ctypes)]
10557 extern "unadjusted" {
10558 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v4i32.v16i8")]
10559 fn vdotq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t;
10560 }
10561 vdotq_u32_(a, b, c)
17df50a5
XL
10562}
10563
3c0e092e 10564/// Dot product arithmetic
f2b60f7d
FG
10565///
10566/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)
17df50a5 10567#[inline]
3c0e092e
XL
10568#[target_feature(enable = "neon,dotprod")]
10569#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10570#[rustc_legacy_const_generics(3)]
10571pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
353b0b11
FG
10572 static_assert_uimm_bits!(LANE, 1);
10573 let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e 10574 vdot_s32(a, b, c)
17df50a5
XL
10575}
10576
3c0e092e 10577/// Dot product arithmetic
f2b60f7d
FG
10578///
10579/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)
17df50a5 10580#[inline]
3c0e092e
XL
10581#[target_feature(enable = "neon,dotprod")]
10582#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10583#[rustc_legacy_const_generics(3)]
10584pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
353b0b11
FG
10585 static_assert_uimm_bits!(LANE, 2);
10586 let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e 10587 vdot_s32(a, b, c)
17df50a5
XL
10588}
10589
3c0e092e 10590/// Dot product arithmetic
f2b60f7d
FG
10591///
10592/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)
17df50a5 10593#[inline]
3c0e092e
XL
10594#[target_feature(enable = "neon,dotprod")]
10595#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10596#[rustc_legacy_const_generics(3)]
10597pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t {
353b0b11
FG
10598 static_assert_uimm_bits!(LANE, 1);
10599 let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e 10600 vdotq_s32(a, b, c)
17df50a5
XL
10601}
10602
3c0e092e 10603/// Dot product arithmetic
f2b60f7d
FG
10604///
10605/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)
17df50a5 10606#[inline]
3c0e092e
XL
10607#[target_feature(enable = "neon,dotprod")]
10608#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10609#[rustc_legacy_const_generics(3)]
10610pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
353b0b11
FG
10611 static_assert_uimm_bits!(LANE, 2);
10612 let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e 10613 vdotq_s32(a, b, c)
17df50a5
XL
10614}
10615
3c0e092e 10616/// Dot product arithmetic
f2b60f7d
FG
10617///
10618/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)
3c0e092e
XL
10619#[inline]
10620#[target_feature(enable = "neon,dotprod")]
10621#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10622#[rustc_legacy_const_generics(3)]
10623pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
353b0b11
FG
10624 static_assert_uimm_bits!(LANE, 1);
10625 let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e
XL
10626 vdot_u32(a, b, c)
10627}
10628
10629/// Dot product arithmetic
f2b60f7d
FG
10630///
10631/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)
3c0e092e
XL
10632#[inline]
10633#[target_feature(enable = "neon,dotprod")]
10634#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10635#[rustc_legacy_const_generics(3)]
10636pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
353b0b11
FG
10637 static_assert_uimm_bits!(LANE, 2);
10638 let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e
XL
10639 vdot_u32(a, b, c)
10640}
10641
10642/// Dot product arithmetic
f2b60f7d
FG
10643///
10644/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)
3c0e092e
XL
10645#[inline]
10646#[target_feature(enable = "neon,dotprod")]
10647#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10648#[rustc_legacy_const_generics(3)]
10649pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t {
353b0b11
FG
10650 static_assert_uimm_bits!(LANE, 1);
10651 let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e
XL
10652 vdotq_u32(a, b, c)
10653}
10654
10655/// Dot product arithmetic
f2b60f7d
FG
10656///
10657/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)
3c0e092e
XL
10658#[inline]
10659#[target_feature(enable = "neon,dotprod")]
10660#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10661#[rustc_legacy_const_generics(3)]
10662pub unsafe fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
353b0b11
FG
10663 static_assert_uimm_bits!(LANE, 2);
10664 let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
3c0e092e
XL
10665 vdotq_u32(a, b, c)
10666}
10667
10668/// Maximum (vector)
f2b60f7d
FG
10669///
10670/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)
17df50a5
XL
10671#[inline]
10672#[target_feature(enable = "neon")]
3c0e092e 10673#[cfg_attr(test, assert_instr(fmax))]
a2a8927a 10674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 10675pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
17df50a5 10676 #[allow(improper_ctypes)]
c295e0f8 10677 extern "unadjusted" {
3c0e092e
XL
10678 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v1f64")]
10679 fn vmax_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
17df50a5 10680 }
3c0e092e 10681 vmax_f64_(a, b)
17df50a5
XL
10682}
10683
3c0e092e 10684/// Maximum (vector)
f2b60f7d
FG
10685///
10686/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)
17df50a5
XL
10687#[inline]
10688#[target_feature(enable = "neon")]
3c0e092e 10689#[cfg_attr(test, assert_instr(fmax))]
a2a8927a 10690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 10691pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
17df50a5 10692 #[allow(improper_ctypes)]
c295e0f8 10693 extern "unadjusted" {
3c0e092e
XL
10694 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v2f64")]
10695 fn vmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
17df50a5 10696 }
3c0e092e 10697 vmaxq_f64_(a, b)
17df50a5
XL
10698}
10699
5e7ed085 10700/// Floating-point Maximum Number (vector)
f2b60f7d
FG
10701///
10702/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)
17df50a5
XL
10703#[inline]
10704#[target_feature(enable = "neon")]
3c0e092e 10705#[cfg_attr(test, assert_instr(fmaxnm))]
a2a8927a 10706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 10707pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
17df50a5 10708 #[allow(improper_ctypes)]
c295e0f8 10709 extern "unadjusted" {
3c0e092e
XL
10710 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v1f64")]
10711 fn vmaxnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
17df50a5 10712 }
3c0e092e 10713 vmaxnm_f64_(a, b)
17df50a5
XL
10714}
10715
5e7ed085 10716/// Floating-point Maximum Number (vector)
f2b60f7d
FG
10717///
10718/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)
17df50a5
XL
10719#[inline]
10720#[target_feature(enable = "neon")]
3c0e092e 10721#[cfg_attr(test, assert_instr(fmaxnm))]
a2a8927a 10722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 10723pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
17df50a5 10724 #[allow(improper_ctypes)]
c295e0f8 10725 extern "unadjusted" {
3c0e092e
XL
10726 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v2f64")]
10727 fn vmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
17df50a5 10728 }
3c0e092e 10729 vmaxnmq_f64_(a, b)
17df50a5
XL
10730}
10731
3c0e092e 10732/// Floating-point maximum number across vector
f2b60f7d
FG
10733///
10734/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)
17df50a5
XL
10735#[inline]
10736#[target_feature(enable = "neon")]
3c0e092e 10737#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10739pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 {
10740 #[allow(improper_ctypes)]
10741 extern "unadjusted" {
10742 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32")]
10743 fn vmaxnmv_f32_(a: float32x2_t) -> f32;
10744 }
10745 vmaxnmv_f32_(a)
17df50a5
XL
10746}
10747
3c0e092e 10748/// Floating-point maximum number across vector
f2b60f7d
FG
10749///
10750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)
17df50a5
XL
10751#[inline]
10752#[target_feature(enable = "neon")]
3c0e092e 10753#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10755pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
10756 #[allow(improper_ctypes)]
10757 extern "unadjusted" {
10758 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64")]
10759 fn vmaxnmvq_f64_(a: float64x2_t) -> f64;
10760 }
10761 vmaxnmvq_f64_(a)
17df50a5
XL
10762}
10763
3c0e092e 10764/// Floating-point maximum number across vector
f2b60f7d
FG
10765///
10766/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)
17df50a5
XL
10767#[inline]
10768#[target_feature(enable = "neon")]
3c0e092e 10769#[cfg_attr(test, assert_instr(fmaxnmv))]
a2a8927a 10770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10771pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
10772 #[allow(improper_ctypes)]
10773 extern "unadjusted" {
10774 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32")]
10775 fn vmaxnmvq_f32_(a: float32x4_t) -> f32;
10776 }
10777 vmaxnmvq_f32_(a)
17df50a5
XL
10778}
10779
3c0e092e 10780/// Floating-point Maximum Number Pairwise (vector).
f2b60f7d
FG
10781///
10782/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)
17df50a5
XL
10783#[inline]
10784#[target_feature(enable = "neon")]
3c0e092e 10785#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10787pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10788 #[allow(improper_ctypes)]
10789 extern "unadjusted" {
10790 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v2f32")]
10791 fn vpmaxnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10792 }
10793 vpmaxnm_f32_(a, b)
17df50a5
XL
10794}
10795
3c0e092e 10796/// Floating-point Maximum Number Pairwise (vector).
f2b60f7d
FG
10797///
10798/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)
17df50a5
XL
10799#[inline]
10800#[target_feature(enable = "neon")]
3c0e092e 10801#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10803pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10804 #[allow(improper_ctypes)]
10805 extern "unadjusted" {
10806 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v2f64")]
10807 fn vpmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10808 }
10809 vpmaxnmq_f64_(a, b)
17df50a5
XL
10810}
10811
3c0e092e 10812/// Floating-point Maximum Number Pairwise (vector).
f2b60f7d
FG
10813///
10814/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)
17df50a5
XL
10815#[inline]
10816#[target_feature(enable = "neon")]
3c0e092e 10817#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10819pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
10820 #[allow(improper_ctypes)]
10821 extern "unadjusted" {
10822 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v4f32")]
10823 fn vpmaxnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
10824 }
10825 vpmaxnmq_f32_(a, b)
17df50a5
XL
10826}
10827
3c0e092e 10828/// Floating-point maximum number pairwise
f2b60f7d
FG
10829///
10830/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)
17df50a5
XL
10831#[inline]
10832#[target_feature(enable = "neon")]
3c0e092e 10833#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10835pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 {
10836 #[allow(improper_ctypes)]
10837 extern "unadjusted" {
10838 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32")]
10839 fn vpmaxnms_f32_(a: float32x2_t) -> f32;
10840 }
10841 vpmaxnms_f32_(a)
17df50a5
XL
10842}
10843
3c0e092e 10844/// Floating-point maximum number pairwise
f2b60f7d
FG
10845///
10846/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)
17df50a5
XL
10847#[inline]
10848#[target_feature(enable = "neon")]
3c0e092e 10849#[cfg_attr(test, assert_instr(fmaxnmp))]
a2a8927a 10850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10851pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
10852 #[allow(improper_ctypes)]
10853 extern "unadjusted" {
10854 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64")]
10855 fn vpmaxnmqd_f64_(a: float64x2_t) -> f64;
10856 }
10857 vpmaxnmqd_f64_(a)
17df50a5
XL
10858}
10859
3c0e092e 10860/// Floating-point maximum pairwise
f2b60f7d
FG
10861///
10862/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)
17df50a5
XL
10863#[inline]
10864#[target_feature(enable = "neon")]
3c0e092e 10865#[cfg_attr(test, assert_instr(fmaxp))]
a2a8927a 10866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10867pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 {
10868 #[allow(improper_ctypes)]
10869 extern "unadjusted" {
10870 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32")]
10871 fn vpmaxs_f32_(a: float32x2_t) -> f32;
10872 }
10873 vpmaxs_f32_(a)
17df50a5
XL
10874}
10875
3c0e092e 10876/// Floating-point maximum pairwise
f2b60f7d
FG
10877///
10878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)
17df50a5
XL
10879#[inline]
10880#[target_feature(enable = "neon")]
3c0e092e 10881#[cfg_attr(test, assert_instr(fmaxp))]
a2a8927a 10882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10883pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 {
10884 #[allow(improper_ctypes)]
10885 extern "unadjusted" {
10886 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64")]
10887 fn vpmaxqd_f64_(a: float64x2_t) -> f64;
10888 }
10889 vpmaxqd_f64_(a)
17df50a5
XL
10890}
10891
3c0e092e 10892/// Minimum (vector)
f2b60f7d
FG
10893///
10894/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)
17df50a5
XL
10895#[inline]
10896#[target_feature(enable = "neon")]
3c0e092e 10897#[cfg_attr(test, assert_instr(fmin))]
a2a8927a 10898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10899pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10900 #[allow(improper_ctypes)]
10901 extern "unadjusted" {
10902 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v1f64")]
10903 fn vmin_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10904 }
10905 vmin_f64_(a, b)
17df50a5
XL
10906}
10907
3c0e092e 10908/// Minimum (vector)
f2b60f7d
FG
10909///
10910/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)
17df50a5
XL
10911#[inline]
10912#[target_feature(enable = "neon")]
3c0e092e 10913#[cfg_attr(test, assert_instr(fmin))]
a2a8927a 10914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10915pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10916 #[allow(improper_ctypes)]
10917 extern "unadjusted" {
10918 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v2f64")]
10919 fn vminq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10920 }
10921 vminq_f64_(a, b)
17df50a5
XL
10922}
10923
5e7ed085 10924/// Floating-point Minimum Number (vector)
f2b60f7d
FG
10925///
10926/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)
17df50a5
XL
10927#[inline]
10928#[target_feature(enable = "neon")]
3c0e092e 10929#[cfg_attr(test, assert_instr(fminnm))]
a2a8927a 10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10931pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10932 #[allow(improper_ctypes)]
10933 extern "unadjusted" {
10934 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v1f64")]
10935 fn vminnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10936 }
10937 vminnm_f64_(a, b)
17df50a5
XL
10938}
10939
5e7ed085 10940/// Floating-point Minimum Number (vector)
f2b60f7d
FG
10941///
10942/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)
17df50a5
XL
10943#[inline]
10944#[target_feature(enable = "neon")]
3c0e092e 10945#[cfg_attr(test, assert_instr(fminnm))]
a2a8927a 10946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10947pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10948 #[allow(improper_ctypes)]
10949 extern "unadjusted" {
10950 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v2f64")]
10951 fn vminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10952 }
10953 vminnmq_f64_(a, b)
17df50a5
XL
10954}
10955
3c0e092e 10956/// Floating-point minimum number across vector
f2b60f7d
FG
10957///
10958/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)
17df50a5
XL
10959#[inline]
10960#[target_feature(enable = "neon")]
3c0e092e 10961#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 10962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10963pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 {
10964 #[allow(improper_ctypes)]
10965 extern "unadjusted" {
10966 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32")]
10967 fn vminnmv_f32_(a: float32x2_t) -> f32;
10968 }
10969 vminnmv_f32_(a)
17df50a5
XL
10970}
10971
3c0e092e 10972/// Floating-point minimum number across vector
f2b60f7d
FG
10973///
10974/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)
17df50a5
XL
10975#[inline]
10976#[target_feature(enable = "neon")]
3c0e092e 10977#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 10978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10979pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 {
10980 #[allow(improper_ctypes)]
10981 extern "unadjusted" {
10982 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64")]
10983 fn vminnmvq_f64_(a: float64x2_t) -> f64;
10984 }
10985 vminnmvq_f64_(a)
17df50a5
XL
10986}
10987
3c0e092e 10988/// Floating-point minimum number across vector
f2b60f7d
FG
10989///
10990/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)
17df50a5
XL
10991#[inline]
10992#[target_feature(enable = "neon")]
3c0e092e 10993#[cfg_attr(test, assert_instr(fminnmv))]
a2a8927a 10994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
10995pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 {
10996 #[allow(improper_ctypes)]
10997 extern "unadjusted" {
10998 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32")]
10999 fn vminnmvq_f32_(a: float32x4_t) -> f32;
11000 }
11001 vminnmvq_f32_(a)
17df50a5
XL
11002}
11003
3c0e092e 11004/// Vector move
f2b60f7d
FG
11005///
11006/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)
17df50a5
XL
11007#[inline]
11008#[target_feature(enable = "neon")]
3c0e092e 11009#[cfg_attr(test, assert_instr(sxtl2))]
a2a8927a 11010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11011pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
353b0b11 11012 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 11013 vmovl_s8(a)
17df50a5
XL
11014}
11015
3c0e092e 11016/// Vector move
f2b60f7d
FG
11017///
11018/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)
17df50a5
XL
11019#[inline]
11020#[target_feature(enable = "neon")]
3c0e092e 11021#[cfg_attr(test, assert_instr(sxtl2))]
a2a8927a 11022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11023pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
353b0b11 11024 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e 11025 vmovl_s16(a)
17df50a5
XL
11026}
11027
3c0e092e 11028/// Vector move
f2b60f7d
FG
11029///
11030/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)
17df50a5
XL
11031#[inline]
11032#[target_feature(enable = "neon")]
3c0e092e 11033#[cfg_attr(test, assert_instr(sxtl2))]
a2a8927a 11034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11035pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
353b0b11 11036 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e 11037 vmovl_s32(a)
17df50a5
XL
11038}
11039
3c0e092e 11040/// Vector move
f2b60f7d
FG
11041///
11042/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)
17df50a5
XL
11043#[inline]
11044#[target_feature(enable = "neon")]
3c0e092e 11045#[cfg_attr(test, assert_instr(uxtl2))]
a2a8927a 11046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11047pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
353b0b11 11048 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e 11049 vmovl_u8(a)
17df50a5
XL
11050}
11051
3c0e092e 11052/// Vector move
f2b60f7d
FG
11053///
11054/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)
17df50a5
XL
11055#[inline]
11056#[target_feature(enable = "neon")]
3c0e092e 11057#[cfg_attr(test, assert_instr(uxtl2))]
a2a8927a 11058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11059pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
353b0b11 11060 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e 11061 vmovl_u16(a)
17df50a5
XL
11062}
11063
3c0e092e 11064/// Vector move
f2b60f7d
FG
11065///
11066/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)
17df50a5
XL
11067#[inline]
11068#[target_feature(enable = "neon")]
3c0e092e 11069#[cfg_attr(test, assert_instr(uxtl2))]
a2a8927a 11070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11071pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
353b0b11 11072 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e 11073 vmovl_u32(a)
17df50a5
XL
11074}
11075
3c0e092e 11076/// Floating-point add pairwise
f2b60f7d
FG
11077///
11078/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)
17df50a5
XL
11079#[inline]
11080#[target_feature(enable = "neon")]
3c0e092e 11081#[cfg_attr(test, assert_instr(faddp))]
a2a8927a 11082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11083pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
11084 #[allow(improper_ctypes)]
11085 extern "unadjusted" {
11086 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddp.v4f32")]
11087 fn vpaddq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
11088 }
11089 vpaddq_f32_(a, b)
17df50a5
XL
11090}
11091
3c0e092e 11092/// Floating-point add pairwise
f2b60f7d
FG
11093///
11094/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)
17df50a5
XL
11095#[inline]
11096#[target_feature(enable = "neon")]
3c0e092e 11097#[cfg_attr(test, assert_instr(faddp))]
a2a8927a 11098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11099pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
11100 #[allow(improper_ctypes)]
11101 extern "unadjusted" {
11102 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddp.v2f64")]
11103 fn vpaddq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
11104 }
11105 vpaddq_f64_(a, b)
17df50a5
XL
11106}
11107
3c0e092e 11108/// Floating-point add pairwise
f2b60f7d
FG
11109///
11110/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)
17df50a5
XL
11111#[inline]
11112#[target_feature(enable = "neon")]
c295e0f8 11113#[cfg_attr(test, assert_instr(nop))]
a2a8927a 11114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11115pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 {
11116 let a1: f32 = simd_extract(a, 0);
11117 let a2: f32 = simd_extract(a, 1);
11118 a1 + a2
17df50a5
XL
11119}
11120
3c0e092e 11121/// Floating-point add pairwise
f2b60f7d
FG
11122///
11123/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)
17df50a5
XL
11124#[inline]
11125#[target_feature(enable = "neon")]
c295e0f8 11126#[cfg_attr(test, assert_instr(nop))]
a2a8927a 11127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11128pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 {
11129 let a1: f64 = simd_extract(a, 0);
11130 let a2: f64 = simd_extract(a, 1);
11131 a1 + a2
17df50a5
XL
11132}
11133
3c0e092e 11134/// Floating-point Minimum Number Pairwise (vector).
f2b60f7d
FG
11135///
11136/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)
17df50a5
XL
11137#[inline]
11138#[target_feature(enable = "neon")]
3c0e092e 11139#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11141pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
11142 #[allow(improper_ctypes)]
11143 extern "unadjusted" {
11144 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v2f32")]
11145 fn vpminnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
11146 }
11147 vpminnm_f32_(a, b)
17df50a5
XL
11148}
11149
3c0e092e 11150/// Floating-point Minimum Number Pairwise (vector).
f2b60f7d
FG
11151///
11152/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)
17df50a5
XL
11153#[inline]
11154#[target_feature(enable = "neon")]
3c0e092e 11155#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 11156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11157pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
11158 #[allow(improper_ctypes)]
11159 extern "unadjusted" {
11160 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v2f64")]
11161 fn vpminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
11162 }
11163 vpminnmq_f64_(a, b)
17df50a5
XL
11164}
11165
3c0e092e 11166/// Floating-point Minimum Number Pairwise (vector).
f2b60f7d
FG
11167///
11168/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)
17df50a5
XL
11169#[inline]
11170#[target_feature(enable = "neon")]
3c0e092e 11171#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 11172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11173pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
11174 #[allow(improper_ctypes)]
11175 extern "unadjusted" {
11176 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v4f32")]
11177 fn vpminnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
11178 }
11179 vpminnmq_f32_(a, b)
17df50a5
XL
11180}
11181
3c0e092e 11182/// Floating-point minimum number pairwise
f2b60f7d
FG
11183///
11184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)
17df50a5
XL
11185#[inline]
11186#[target_feature(enable = "neon")]
3c0e092e 11187#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 11188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11189pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 {
11190 #[allow(improper_ctypes)]
11191 extern "unadjusted" {
11192 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32")]
11193 fn vpminnms_f32_(a: float32x2_t) -> f32;
11194 }
11195 vpminnms_f32_(a)
17df50a5
XL
11196}
11197
3c0e092e 11198/// Floating-point minimum number pairwise
f2b60f7d
FG
11199///
11200/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)
17df50a5
XL
11201#[inline]
11202#[target_feature(enable = "neon")]
3c0e092e 11203#[cfg_attr(test, assert_instr(fminnmp))]
a2a8927a 11204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11205pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 {
11206 #[allow(improper_ctypes)]
11207 extern "unadjusted" {
11208 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64")]
11209 fn vpminnmqd_f64_(a: float64x2_t) -> f64;
11210 }
11211 vpminnmqd_f64_(a)
17df50a5
XL
11212}
11213
3c0e092e 11214/// Floating-point minimum pairwise
f2b60f7d
FG
11215///
11216/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)
17df50a5
XL
11217#[inline]
11218#[target_feature(enable = "neon")]
3c0e092e 11219#[cfg_attr(test, assert_instr(fminp))]
a2a8927a 11220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11221pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 {
11222 #[allow(improper_ctypes)]
11223 extern "unadjusted" {
11224 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminv.f32.v2f32")]
11225 fn vpmins_f32_(a: float32x2_t) -> f32;
11226 }
11227 vpmins_f32_(a)
17df50a5
XL
11228}
11229
3c0e092e 11230/// Floating-point minimum pairwise
f2b60f7d
FG
11231///
11232/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)
17df50a5
XL
11233#[inline]
11234#[target_feature(enable = "neon")]
3c0e092e 11235#[cfg_attr(test, assert_instr(fminp))]
a2a8927a 11236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11237pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 {
11238 #[allow(improper_ctypes)]
11239 extern "unadjusted" {
11240 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminv.f64.v2f64")]
11241 fn vpminqd_f64_(a: float64x2_t) -> f64;
11242 }
11243 vpminqd_f64_(a)
17df50a5
XL
11244}
11245
3c0e092e 11246/// Signed saturating doubling multiply long
f2b60f7d
FG
11247///
11248/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)
17df50a5
XL
11249#[inline]
11250#[target_feature(enable = "neon")]
3c0e092e 11251#[cfg_attr(test, assert_instr(sqdmull))]
a2a8927a 11252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11253pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 {
11254 let a: int16x4_t = vdup_n_s16(a);
11255 let b: int16x4_t = vdup_n_s16(b);
11256 simd_extract(vqdmull_s16(a, b), 0)
17df50a5
XL
11257}
11258
3c0e092e 11259/// Signed saturating doubling multiply long
f2b60f7d
FG
11260///
11261/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)
17df50a5
XL
11262#[inline]
11263#[target_feature(enable = "neon")]
3c0e092e 11264#[cfg_attr(test, assert_instr(sqdmull))]
a2a8927a 11265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11266pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 {
11267 #[allow(improper_ctypes)]
11268 extern "unadjusted" {
11269 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulls.scalar")]
11270 fn vqdmulls_s32_(a: i32, b: i32) -> i64;
11271 }
11272 vqdmulls_s32_(a, b)
17df50a5
XL
11273}
11274
3c0e092e 11275/// Signed saturating doubling multiply long
f2b60f7d
FG
11276///
11277/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)
17df50a5
XL
11278#[inline]
11279#[target_feature(enable = "neon")]
3c0e092e 11280#[cfg_attr(test, assert_instr(sqdmull2))]
a2a8927a 11281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11282pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
11283 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11284 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
3c0e092e 11285 vqdmull_s16(a, b)
17df50a5
XL
11286}
11287
3c0e092e 11288/// Signed saturating doubling multiply long
f2b60f7d
FG
11289///
11290/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)
17df50a5
XL
11291#[inline]
11292#[target_feature(enable = "neon")]
3c0e092e 11293#[cfg_attr(test, assert_instr(sqdmull2))]
a2a8927a 11294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11295pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
11296 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11297 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
3c0e092e 11298 vqdmull_s32(a, b)
17df50a5
XL
11299}
11300
3c0e092e 11301/// Signed saturating doubling multiply long
f2b60f7d
FG
11302///
11303/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)
17df50a5
XL
11304#[inline]
11305#[target_feature(enable = "neon")]
3c0e092e 11306#[cfg_attr(test, assert_instr(sqdmull2))]
a2a8927a 11307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11308pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
353b0b11 11309 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e
XL
11310 let b: int16x4_t = vdup_n_s16(b);
11311 vqdmull_s16(a, b)
17df50a5
XL
11312}
11313
3c0e092e 11314/// Signed saturating doubling multiply long
f2b60f7d
FG
11315///
11316/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)
17df50a5
XL
11317#[inline]
11318#[target_feature(enable = "neon")]
3c0e092e 11319#[cfg_attr(test, assert_instr(sqdmull2))]
a2a8927a 11320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11321pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
353b0b11 11322 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e
XL
11323 let b: int32x2_t = vdup_n_s32(b);
11324 vqdmull_s32(a, b)
17df50a5
XL
11325}
11326
3c0e092e 11327/// Vector saturating doubling long multiply by scalar
f2b60f7d
FG
11328///
11329/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)
17df50a5
XL
11330#[inline]
11331#[target_feature(enable = "neon")]
3c0e092e
XL
11332#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
11333#[rustc_legacy_const_generics(2)]
a2a8927a 11334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11335pub unsafe fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
11336 static_assert_uimm_bits!(N, 3);
11337 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
3c0e092e 11338 vqdmull_s16(a, b)
17df50a5
XL
11339}
11340
3c0e092e 11341/// Vector saturating doubling long multiply by scalar
f2b60f7d
FG
11342///
11343/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)
17df50a5
XL
11344#[inline]
11345#[target_feature(enable = "neon")]
3c0e092e
XL
11346#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11347#[rustc_legacy_const_generics(2)]
a2a8927a 11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11349pub unsafe fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
11350 static_assert_uimm_bits!(N, 2);
11351 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
3c0e092e 11352 vqdmull_s32(a, b)
17df50a5
XL
11353}
11354
3c0e092e 11355/// Signed saturating doubling multiply long
f2b60f7d
FG
11356///
11357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)
17df50a5
XL
11358#[inline]
11359#[target_feature(enable = "neon")]
3c0e092e
XL
11360#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11361#[rustc_legacy_const_generics(2)]
a2a8927a 11362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11363pub unsafe fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
353b0b11 11364 static_assert_uimm_bits!(N, 2);
3c0e092e
XL
11365 let b: i16 = simd_extract(b, N as u32);
11366 vqdmullh_s16(a, b)
17df50a5
XL
11367}
11368
3c0e092e 11369/// Signed saturating doubling multiply long
f2b60f7d
FG
11370///
11371/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)
17df50a5
XL
11372#[inline]
11373#[target_feature(enable = "neon")]
3c0e092e
XL
11374#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
11375#[rustc_legacy_const_generics(2)]
a2a8927a 11376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11377pub unsafe fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
353b0b11 11378 static_assert_uimm_bits!(N, 3);
3c0e092e
XL
11379 let b: i16 = simd_extract(b, N as u32);
11380 vqdmullh_s16(a, b)
17df50a5
XL
11381}
11382
3c0e092e 11383/// Signed saturating doubling multiply long
f2b60f7d
FG
11384///
11385/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)
17df50a5
XL
11386#[inline]
11387#[target_feature(enable = "neon")]
3c0e092e
XL
11388#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
11389#[rustc_legacy_const_generics(2)]
a2a8927a 11390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11391pub unsafe fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
353b0b11 11392 static_assert_uimm_bits!(N, 1);
3c0e092e
XL
11393 let b: i32 = simd_extract(b, N as u32);
11394 vqdmulls_s32(a, b)
17df50a5
XL
11395}
11396
3c0e092e 11397/// Signed saturating doubling multiply long
f2b60f7d
FG
11398///
11399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)
17df50a5
XL
11400#[inline]
11401#[target_feature(enable = "neon")]
3c0e092e
XL
11402#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11403#[rustc_legacy_const_generics(2)]
a2a8927a 11404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11405pub unsafe fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
353b0b11 11406 static_assert_uimm_bits!(N, 2);
3c0e092e
XL
11407 let b: i32 = simd_extract(b, N as u32);
11408 vqdmulls_s32(a, b)
17df50a5
XL
11409}
11410
3c0e092e 11411/// Signed saturating doubling multiply long
f2b60f7d
FG
11412///
11413/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)
17df50a5
XL
11414#[inline]
11415#[target_feature(enable = "neon")]
3c0e092e
XL
11416#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
11417#[rustc_legacy_const_generics(2)]
a2a8927a 11418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11419pub unsafe fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
353b0b11
FG
11420 static_assert_uimm_bits!(N, 2);
11421 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11422 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
3c0e092e 11423 vqdmull_s16(a, b)
17df50a5
XL
11424}
11425
3c0e092e 11426/// Signed saturating doubling multiply long
f2b60f7d
FG
11427///
11428/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)
17df50a5
XL
11429#[inline]
11430#[target_feature(enable = "neon")]
3c0e092e
XL
11431#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
11432#[rustc_legacy_const_generics(2)]
a2a8927a 11433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11434pub unsafe fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
353b0b11
FG
11435 static_assert_uimm_bits!(N, 1);
11436 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11437 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
3c0e092e 11438 vqdmull_s32(a, b)
17df50a5
XL
11439}
11440
3c0e092e 11441/// Signed saturating doubling multiply long
f2b60f7d
FG
11442///
11443/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)
17df50a5
XL
11444#[inline]
11445#[target_feature(enable = "neon")]
3c0e092e
XL
11446#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
11447#[rustc_legacy_const_generics(2)]
a2a8927a 11448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11449pub unsafe fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
353b0b11
FG
11450 static_assert_uimm_bits!(N, 3);
11451 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11452 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
3c0e092e 11453 vqdmull_s16(a, b)
17df50a5
XL
11454}
11455
3c0e092e 11456/// Signed saturating doubling multiply long
f2b60f7d
FG
11457///
11458/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)
17df50a5
XL
11459#[inline]
11460#[target_feature(enable = "neon")]
3c0e092e
XL
11461#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
11462#[rustc_legacy_const_generics(2)]
a2a8927a 11463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11464pub unsafe fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
353b0b11
FG
11465 static_assert_uimm_bits!(N, 2);
11466 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11467 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
3c0e092e 11468 vqdmull_s32(a, b)
17df50a5
XL
11469}
11470
3c0e092e 11471/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11472///
11473/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)
17df50a5
XL
11474#[inline]
11475#[target_feature(enable = "neon")]
3c0e092e 11476#[cfg_attr(test, assert_instr(sqdmlal2))]
a2a8927a 11477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11478pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11479 vqaddq_s32(a, vqdmull_high_s16(b, c))
17df50a5
XL
11480}
11481
3c0e092e 11482/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11483///
11484/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)
17df50a5
XL
11485#[inline]
11486#[target_feature(enable = "neon")]
3c0e092e 11487#[cfg_attr(test, assert_instr(sqdmlal2))]
a2a8927a 11488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11489pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11490 vqaddq_s64(a, vqdmull_high_s32(b, c))
17df50a5
XL
11491}
11492
3c0e092e 11493/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11494///
11495/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)
17df50a5
XL
11496#[inline]
11497#[target_feature(enable = "neon")]
3c0e092e 11498#[cfg_attr(test, assert_instr(sqdmlal2))]
a2a8927a 11499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11500pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
11501 vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17df50a5
XL
11502}
11503
3c0e092e 11504/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11505///
11506/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)
17df50a5
XL
11507#[inline]
11508#[target_feature(enable = "neon")]
3c0e092e 11509#[cfg_attr(test, assert_instr(sqdmlal2))]
a2a8927a 11510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11511pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
11512 vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17df50a5
XL
11513}
11514
3c0e092e 11515/// Vector widening saturating doubling multiply accumulate with scalar
f2b60f7d
FG
11516///
11517/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)
17df50a5
XL
11518#[inline]
11519#[target_feature(enable = "neon")]
3c0e092e
XL
11520#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
11521#[rustc_legacy_const_generics(3)]
a2a8927a 11522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11523pub unsafe fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
353b0b11 11524 static_assert_uimm_bits!(N, 3);
3c0e092e 11525 vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17df50a5
XL
11526}
11527
3c0e092e 11528/// Vector widening saturating doubling multiply accumulate with scalar
f2b60f7d
FG
11529///
11530/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)
17df50a5
XL
11531#[inline]
11532#[target_feature(enable = "neon")]
3c0e092e
XL
11533#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
11534#[rustc_legacy_const_generics(3)]
a2a8927a 11535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11536pub unsafe fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
353b0b11 11537 static_assert_uimm_bits!(N, 2);
3c0e092e 11538 vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17df50a5
XL
11539}
11540
3c0e092e 11541/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11542///
11543/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)
17df50a5
XL
11544#[inline]
11545#[target_feature(enable = "neon")]
3c0e092e
XL
11546#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11547#[rustc_legacy_const_generics(3)]
a2a8927a 11548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11549pub unsafe fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
353b0b11 11550 static_assert_uimm_bits!(N, 2);
3c0e092e 11551 vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17df50a5
XL
11552}
11553
3c0e092e 11554/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11555///
11556/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)
17df50a5
XL
11557#[inline]
11558#[target_feature(enable = "neon")]
3c0e092e
XL
11559#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11560#[rustc_legacy_const_generics(3)]
a2a8927a 11561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11562pub unsafe fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11 11563 static_assert_uimm_bits!(N, 3);
3c0e092e 11564 vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17df50a5
XL
11565}
11566
3c0e092e 11567/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11568///
11569/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)
17df50a5
XL
11570#[inline]
11571#[target_feature(enable = "neon")]
3c0e092e
XL
11572#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11573#[rustc_legacy_const_generics(3)]
a2a8927a 11574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11575pub unsafe fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
353b0b11 11576 static_assert_uimm_bits!(N, 1);
3c0e092e 11577 vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17df50a5
XL
11578}
11579
3c0e092e 11580/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11581///
11582/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)
17df50a5
XL
11583#[inline]
11584#[target_feature(enable = "neon")]
3c0e092e
XL
11585#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11586#[rustc_legacy_const_generics(3)]
a2a8927a 11587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11588pub unsafe fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11 11589 static_assert_uimm_bits!(N, 2);
3c0e092e 11590 vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17df50a5
XL
11591}
11592
3c0e092e 11593/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11594///
11595/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)
17df50a5
XL
11596#[inline]
11597#[target_feature(enable = "neon")]
49aad941 11598#[cfg_attr(test, assert_instr(sqdmlal))]
a2a8927a 11599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11600pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
11601 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
11602 vqadds_s32(a, simd_extract(x, 0))
17df50a5
XL
11603}
11604
3c0e092e 11605/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11606///
11607/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)
17df50a5
XL
11608#[inline]
11609#[target_feature(enable = "neon")]
49aad941 11610#[cfg_attr(test, assert_instr(sqdmlal))]
a2a8927a 11611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11612pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
49aad941
FG
11613 let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
11614 x as i64
17df50a5
XL
11615}
11616
3c0e092e 11617/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11618///
11619/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)
17df50a5
XL
11620#[inline]
11621#[target_feature(enable = "neon")]
3c0e092e
XL
11622#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11623#[rustc_legacy_const_generics(3)]
a2a8927a 11624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11625pub unsafe fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
353b0b11 11626 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11627 vqdmlalh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11628}
11629
3c0e092e 11630/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11631///
11632/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)
17df50a5
XL
11633#[inline]
11634#[target_feature(enable = "neon")]
3c0e092e
XL
11635#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11636#[rustc_legacy_const_generics(3)]
a2a8927a 11637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11638pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
353b0b11 11639 static_assert_uimm_bits!(LANE, 3);
3c0e092e 11640 vqdmlalh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11641}
11642
3c0e092e 11643/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11644///
11645/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)
17df50a5
XL
11646#[inline]
11647#[target_feature(enable = "neon")]
49aad941 11648#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
3c0e092e 11649#[rustc_legacy_const_generics(3)]
a2a8927a 11650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11651pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
353b0b11 11652 static_assert_uimm_bits!(LANE, 1);
3c0e092e 11653 vqdmlals_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11654}
11655
3c0e092e 11656/// Signed saturating doubling multiply-add long
f2b60f7d
FG
11657///
11658/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)
17df50a5
XL
11659#[inline]
11660#[target_feature(enable = "neon")]
49aad941 11661#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
3c0e092e 11662#[rustc_legacy_const_generics(3)]
a2a8927a 11663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11664pub unsafe fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
353b0b11 11665 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11666 vqdmlals_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11667}
11668
3c0e092e 11669/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11670///
11671/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)
17df50a5
XL
11672#[inline]
11673#[target_feature(enable = "neon")]
3c0e092e 11674#[cfg_attr(test, assert_instr(sqdmlsl2))]
a2a8927a 11675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11676pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11677 vqsubq_s32(a, vqdmull_high_s16(b, c))
17df50a5
XL
11678}
11679
3c0e092e 11680/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11681///
11682/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)
17df50a5
XL
11683#[inline]
11684#[target_feature(enable = "neon")]
3c0e092e 11685#[cfg_attr(test, assert_instr(sqdmlsl2))]
a2a8927a 11686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11687pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11688 vqsubq_s64(a, vqdmull_high_s32(b, c))
17df50a5
XL
11689}
11690
3c0e092e 11691/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11692///
11693/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)
17df50a5
XL
11694#[inline]
11695#[target_feature(enable = "neon")]
3c0e092e 11696#[cfg_attr(test, assert_instr(sqdmlsl2))]
a2a8927a 11697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11698pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
11699 vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17df50a5
XL
11700}
11701
3c0e092e 11702/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11703///
11704/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)
17df50a5
XL
11705#[inline]
11706#[target_feature(enable = "neon")]
3c0e092e 11707#[cfg_attr(test, assert_instr(sqdmlsl2))]
a2a8927a 11708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11709pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
11710 vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17df50a5
XL
11711}
11712
3c0e092e 11713/// Vector widening saturating doubling multiply subtract with scalar
f2b60f7d
FG
11714///
11715/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)
17df50a5
XL
11716#[inline]
11717#[target_feature(enable = "neon")]
3c0e092e
XL
11718#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
11719#[rustc_legacy_const_generics(3)]
a2a8927a 11720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11721pub unsafe fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
353b0b11 11722 static_assert_uimm_bits!(N, 3);
3c0e092e 11723 vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17df50a5
XL
11724}
11725
3c0e092e 11726/// Vector widening saturating doubling multiply subtract with scalar
f2b60f7d
FG
11727///
11728/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)
17df50a5
XL
11729#[inline]
11730#[target_feature(enable = "neon")]
3c0e092e
XL
11731#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
11732#[rustc_legacy_const_generics(3)]
a2a8927a 11733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11734pub unsafe fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
353b0b11 11735 static_assert_uimm_bits!(N, 2);
3c0e092e 11736 vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17df50a5
XL
11737}
11738
3c0e092e 11739/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11740///
11741/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)
17df50a5
XL
11742#[inline]
11743#[target_feature(enable = "neon")]
3c0e092e
XL
11744#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11745#[rustc_legacy_const_generics(3)]
a2a8927a 11746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11747pub unsafe fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
353b0b11 11748 static_assert_uimm_bits!(N, 2);
3c0e092e 11749 vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17df50a5
XL
11750}
11751
3c0e092e 11752/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11753///
11754/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)
17df50a5
XL
11755#[inline]
11756#[target_feature(enable = "neon")]
3c0e092e
XL
11757#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11758#[rustc_legacy_const_generics(3)]
a2a8927a 11759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11760pub unsafe fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11 11761 static_assert_uimm_bits!(N, 3);
3c0e092e 11762 vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17df50a5
XL
11763}
11764
3c0e092e 11765/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11766///
11767/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)
17df50a5
XL
11768#[inline]
11769#[target_feature(enable = "neon")]
3c0e092e
XL
11770#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11771#[rustc_legacy_const_generics(3)]
a2a8927a 11772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11773pub unsafe fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
353b0b11 11774 static_assert_uimm_bits!(N, 1);
3c0e092e 11775 vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17df50a5
XL
11776}
11777
3c0e092e 11778/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11779///
11780/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)
17df50a5
XL
11781#[inline]
11782#[target_feature(enable = "neon")]
3c0e092e
XL
11783#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11784#[rustc_legacy_const_generics(3)]
a2a8927a 11785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11786pub unsafe fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11 11787 static_assert_uimm_bits!(N, 2);
3c0e092e 11788 vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17df50a5
XL
11789}
11790
3c0e092e 11791/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11792///
11793/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)
17df50a5
XL
11794#[inline]
11795#[target_feature(enable = "neon")]
49aad941 11796#[cfg_attr(test, assert_instr(sqdmlsl))]
a2a8927a 11797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11798pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
11799 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
11800 vqsubs_s32(a, simd_extract(x, 0))
17df50a5
XL
11801}
11802
3c0e092e 11803/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11804///
11805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)
17df50a5
XL
11806#[inline]
11807#[target_feature(enable = "neon")]
49aad941 11808#[cfg_attr(test, assert_instr(sqdmlsl))]
a2a8927a 11809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11810pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
49aad941
FG
11811 let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
11812 x as i64
17df50a5
XL
11813}
11814
3c0e092e 11815/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11816///
11817/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)
17df50a5
XL
11818#[inline]
11819#[target_feature(enable = "neon")]
3c0e092e
XL
11820#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11821#[rustc_legacy_const_generics(3)]
a2a8927a 11822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11823pub unsafe fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
353b0b11 11824 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11825 vqdmlslh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11826}
11827
3c0e092e 11828/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11829///
11830/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)
17df50a5
XL
11831#[inline]
11832#[target_feature(enable = "neon")]
3c0e092e
XL
11833#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11834#[rustc_legacy_const_generics(3)]
a2a8927a 11835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11836pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
353b0b11 11837 static_assert_uimm_bits!(LANE, 3);
3c0e092e 11838 vqdmlslh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11839}
11840
3c0e092e 11841/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11842///
11843/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)
17df50a5
XL
11844#[inline]
11845#[target_feature(enable = "neon")]
49aad941 11846#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
3c0e092e 11847#[rustc_legacy_const_generics(3)]
a2a8927a 11848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11849pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
353b0b11 11850 static_assert_uimm_bits!(LANE, 1);
3c0e092e 11851 vqdmlsls_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11852}
11853
3c0e092e 11854/// Signed saturating doubling multiply-subtract long
f2b60f7d
FG
11855///
11856/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)
17df50a5
XL
11857#[inline]
11858#[target_feature(enable = "neon")]
49aad941 11859#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
3c0e092e 11860#[rustc_legacy_const_generics(3)]
a2a8927a 11861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11862pub unsafe fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
353b0b11 11863 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11864 vqdmlsls_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
11865}
11866
3c0e092e 11867/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11868///
11869/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)
17df50a5
XL
11870#[inline]
11871#[target_feature(enable = "neon")]
3c0e092e 11872#[cfg_attr(test, assert_instr(sqdmulh))]
a2a8927a 11873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11874pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
11875 let a: int16x4_t = vdup_n_s16(a);
11876 let b: int16x4_t = vdup_n_s16(b);
11877 simd_extract(vqdmulh_s16(a, b), 0)
17df50a5
XL
11878}
11879
3c0e092e 11880/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11881///
11882/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)
17df50a5
XL
11883#[inline]
11884#[target_feature(enable = "neon")]
3c0e092e 11885#[cfg_attr(test, assert_instr(sqdmulh))]
a2a8927a 11886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
11887pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
11888 let a: int32x2_t = vdup_n_s32(a);
11889 let b: int32x2_t = vdup_n_s32(b);
11890 simd_extract(vqdmulh_s32(a, b), 0)
17df50a5
XL
11891}
11892
3c0e092e 11893/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11894///
11895/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)
17df50a5
XL
11896#[inline]
11897#[target_feature(enable = "neon")]
3c0e092e
XL
11898#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
11899#[rustc_legacy_const_generics(2)]
a2a8927a 11900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11901pub unsafe fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
353b0b11 11902 static_assert_uimm_bits!(N, 2);
3c0e092e
XL
11903 let b: i16 = simd_extract(b, N as u32);
11904 vqdmulhh_s16(a, b)
17df50a5
XL
11905}
11906
3c0e092e 11907/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11908///
11909/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)
17df50a5
XL
11910#[inline]
11911#[target_feature(enable = "neon")]
3c0e092e
XL
11912#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
11913#[rustc_legacy_const_generics(2)]
a2a8927a 11914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11915pub unsafe fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
353b0b11 11916 static_assert_uimm_bits!(N, 3);
3c0e092e
XL
11917 let b: i16 = simd_extract(b, N as u32);
11918 vqdmulhh_s16(a, b)
17df50a5
XL
11919}
11920
3c0e092e 11921/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11922///
11923/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)
17df50a5
XL
11924#[inline]
11925#[target_feature(enable = "neon")]
3c0e092e
XL
11926#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
11927#[rustc_legacy_const_generics(2)]
a2a8927a 11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11929pub unsafe fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
353b0b11 11930 static_assert_uimm_bits!(N, 1);
3c0e092e
XL
11931 let b: i32 = simd_extract(b, N as u32);
11932 vqdmulhs_s32(a, b)
17df50a5
XL
11933}
11934
3c0e092e 11935/// Signed saturating doubling multiply returning high half
f2b60f7d
FG
11936///
11937/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)
17df50a5
XL
11938#[inline]
11939#[target_feature(enable = "neon")]
3c0e092e
XL
11940#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
11941#[rustc_legacy_const_generics(2)]
a2a8927a 11942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11943pub unsafe fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
353b0b11 11944 static_assert_uimm_bits!(N, 2);
3c0e092e
XL
11945 let b: i32 = simd_extract(b, N as u32);
11946 vqdmulhs_s32(a, b)
17df50a5
XL
11947}
11948
3c0e092e 11949/// Vector saturating doubling multiply high by scalar
f2b60f7d
FG
11950///
11951/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)
17df50a5
XL
11952#[inline]
11953#[target_feature(enable = "neon")]
3c0e092e
XL
11954#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11955#[rustc_legacy_const_generics(2)]
a2a8927a 11956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11957pub unsafe fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 11958 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11959 vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32)))
17df50a5
XL
11960}
11961
3c0e092e 11962/// Vector saturating doubling multiply high by scalar
f2b60f7d
FG
11963///
11964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)
17df50a5
XL
11965#[inline]
11966#[target_feature(enable = "neon")]
3c0e092e
XL
11967#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11968#[rustc_legacy_const_generics(2)]
a2a8927a 11969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11970pub unsafe fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
353b0b11 11971 static_assert_uimm_bits!(LANE, 2);
3c0e092e 11972 vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32)))
17df50a5
XL
11973}
11974
3c0e092e 11975/// Vector saturating doubling multiply high by scalar
f2b60f7d
FG
11976///
11977/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)
17df50a5
XL
11978#[inline]
11979#[target_feature(enable = "neon")]
3c0e092e
XL
11980#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11981#[rustc_legacy_const_generics(2)]
a2a8927a 11982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11983pub unsafe fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 11984 static_assert_uimm_bits!(LANE, 1);
3c0e092e 11985 vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32)))
17df50a5
XL
11986}
11987
3c0e092e 11988/// Vector saturating doubling multiply high by scalar
f2b60f7d
FG
11989///
11990/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)
17df50a5
XL
11991#[inline]
11992#[target_feature(enable = "neon")]
3c0e092e
XL
11993#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
a2a8927a 11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 11996pub unsafe fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
353b0b11 11997 static_assert_uimm_bits!(LANE, 1);
3c0e092e 11998 vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32)))
17df50a5
XL
11999}
12000
3c0e092e 12001/// Saturating extract narrow
f2b60f7d
FG
12002///
12003/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)
17df50a5
XL
12004#[inline]
12005#[target_feature(enable = "neon")]
3c0e092e 12006#[cfg_attr(test, assert_instr(sqxtn))]
a2a8927a 12007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12008pub unsafe fn vqmovnh_s16(a: i16) -> i8 {
12009 simd_extract(vqmovn_s16(vdupq_n_s16(a)), 0)
17df50a5
XL
12010}
12011
3c0e092e 12012/// Saturating extract narrow
f2b60f7d
FG
12013///
12014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)
17df50a5
XL
12015#[inline]
12016#[target_feature(enable = "neon")]
3c0e092e 12017#[cfg_attr(test, assert_instr(sqxtn))]
a2a8927a 12018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12019pub unsafe fn vqmovns_s32(a: i32) -> i16 {
12020 simd_extract(vqmovn_s32(vdupq_n_s32(a)), 0)
17df50a5
XL
12021}
12022
3c0e092e 12023/// Saturating extract narrow
f2b60f7d
FG
12024///
12025/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)
17df50a5
XL
12026#[inline]
12027#[target_feature(enable = "neon")]
3c0e092e 12028#[cfg_attr(test, assert_instr(uqxtn))]
a2a8927a 12029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12030pub unsafe fn vqmovnh_u16(a: u16) -> u8 {
12031 simd_extract(vqmovn_u16(vdupq_n_u16(a)), 0)
17df50a5
XL
12032}
12033
3c0e092e 12034/// Saturating extract narrow
f2b60f7d
FG
12035///
12036/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)
17df50a5
XL
12037#[inline]
12038#[target_feature(enable = "neon")]
3c0e092e 12039#[cfg_attr(test, assert_instr(uqxtn))]
a2a8927a 12040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12041pub unsafe fn vqmovns_u32(a: u32) -> u16 {
12042 simd_extract(vqmovn_u32(vdupq_n_u32(a)), 0)
17df50a5
XL
12043}
12044
3c0e092e 12045/// Saturating extract narrow
f2b60f7d
FG
12046///
12047/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)
17df50a5
XL
12048#[inline]
12049#[target_feature(enable = "neon")]
3c0e092e 12050#[cfg_attr(test, assert_instr(sqxtn))]
a2a8927a 12051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12052pub unsafe fn vqmovnd_s64(a: i64) -> i32 {
12053 #[allow(improper_ctypes)]
12054 extern "unadjusted" {
12055 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64")]
12056 fn vqmovnd_s64_(a: i64) -> i32;
12057 }
12058 vqmovnd_s64_(a)
17df50a5
XL
12059}
12060
3c0e092e 12061/// Saturating extract narrow
f2b60f7d
FG
12062///
12063/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)
17df50a5
XL
12064#[inline]
12065#[target_feature(enable = "neon")]
3c0e092e 12066#[cfg_attr(test, assert_instr(uqxtn))]
a2a8927a 12067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12068pub unsafe fn vqmovnd_u64(a: u64) -> u32 {
12069 #[allow(improper_ctypes)]
12070 extern "unadjusted" {
12071 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64")]
12072 fn vqmovnd_u64_(a: u64) -> u32;
12073 }
12074 vqmovnd_u64_(a)
17df50a5
XL
12075}
12076
3c0e092e 12077/// Signed saturating extract narrow
f2b60f7d
FG
12078///
12079/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)
17df50a5
XL
12080#[inline]
12081#[target_feature(enable = "neon")]
3c0e092e 12082#[cfg_attr(test, assert_instr(sqxtn2))]
a2a8927a 12083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12084pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
353b0b11 12085 simd_shuffle!(a, vqmovn_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
12086}
12087
3c0e092e 12088/// Signed saturating extract narrow
f2b60f7d
FG
12089///
12090/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)
17df50a5
XL
12091#[inline]
12092#[target_feature(enable = "neon")]
3c0e092e 12093#[cfg_attr(test, assert_instr(sqxtn2))]
a2a8927a 12094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12095pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
353b0b11 12096 simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
12097}
12098
3c0e092e 12099/// Signed saturating extract narrow
f2b60f7d
FG
12100///
12101/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)
17df50a5
XL
12102#[inline]
12103#[target_feature(enable = "neon")]
3c0e092e 12104#[cfg_attr(test, assert_instr(sqxtn2))]
a2a8927a 12105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12106pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
353b0b11 12107 simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3])
17df50a5
XL
12108}
12109
3c0e092e 12110/// Signed saturating extract narrow
f2b60f7d
FG
12111///
12112/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)
17df50a5
XL
12113#[inline]
12114#[target_feature(enable = "neon")]
3c0e092e 12115#[cfg_attr(test, assert_instr(uqxtn2))]
a2a8927a 12116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12117pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
353b0b11 12118 simd_shuffle!(a, vqmovn_u16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
12119}
12120
3c0e092e 12121/// Signed saturating extract narrow
f2b60f7d
FG
12122///
12123/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)
17df50a5
XL
12124#[inline]
12125#[target_feature(enable = "neon")]
3c0e092e 12126#[cfg_attr(test, assert_instr(uqxtn2))]
a2a8927a 12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12128pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
353b0b11 12129 simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
12130}
12131
3c0e092e 12132/// Signed saturating extract narrow
f2b60f7d
FG
12133///
12134/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)
17df50a5
XL
12135#[inline]
12136#[target_feature(enable = "neon")]
3c0e092e 12137#[cfg_attr(test, assert_instr(uqxtn2))]
a2a8927a 12138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12139pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
353b0b11 12140 simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3])
17df50a5
XL
12141}
12142
3c0e092e 12143/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12144///
12145/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)
17df50a5
XL
12146#[inline]
12147#[target_feature(enable = "neon")]
3c0e092e 12148#[cfg_attr(test, assert_instr(sqxtun))]
a2a8927a 12149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12150pub unsafe fn vqmovunh_s16(a: i16) -> u8 {
12151 simd_extract(vqmovun_s16(vdupq_n_s16(a)), 0)
17df50a5
XL
12152}
12153
3c0e092e 12154/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12155///
12156/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)
17df50a5
XL
12157#[inline]
12158#[target_feature(enable = "neon")]
3c0e092e 12159#[cfg_attr(test, assert_instr(sqxtun))]
a2a8927a 12160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12161pub unsafe fn vqmovuns_s32(a: i32) -> u16 {
12162 simd_extract(vqmovun_s32(vdupq_n_s32(a)), 0)
17df50a5
XL
12163}
12164
3c0e092e 12165/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12166///
12167/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)
17df50a5
XL
12168#[inline]
12169#[target_feature(enable = "neon")]
3c0e092e 12170#[cfg_attr(test, assert_instr(sqxtun))]
a2a8927a 12171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12172pub unsafe fn vqmovund_s64(a: i64) -> u32 {
12173 simd_extract(vqmovun_s64(vdupq_n_s64(a)), 0)
17df50a5
XL
12174}
12175
3c0e092e 12176/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12177///
12178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)
17df50a5
XL
12179#[inline]
12180#[target_feature(enable = "neon")]
3c0e092e 12181#[cfg_attr(test, assert_instr(sqxtun2))]
a2a8927a 12182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12183pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
353b0b11 12184 simd_shuffle!(a, vqmovun_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
12185}
12186
3c0e092e 12187/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12188///
12189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)
17df50a5
XL
12190#[inline]
12191#[target_feature(enable = "neon")]
3c0e092e 12192#[cfg_attr(test, assert_instr(sqxtun2))]
a2a8927a 12193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12194pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
353b0b11 12195 simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
12196}
12197
3c0e092e 12198/// Signed saturating extract unsigned narrow
f2b60f7d
FG
12199///
12200/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)
17df50a5
XL
12201#[inline]
12202#[target_feature(enable = "neon")]
3c0e092e 12203#[cfg_attr(test, assert_instr(sqxtun2))]
a2a8927a 12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12205pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
353b0b11 12206 simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3])
17df50a5
XL
12207}
12208
3c0e092e 12209/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12210///
12211/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)
17df50a5
XL
12212#[inline]
12213#[target_feature(enable = "neon")]
3c0e092e 12214#[cfg_attr(test, assert_instr(sqrdmulh))]
a2a8927a 12215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12216pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
12217 simd_extract(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0)
17df50a5
XL
12218}
12219
3c0e092e 12220/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12221///
12222/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)
17df50a5
XL
12223#[inline]
12224#[target_feature(enable = "neon")]
3c0e092e 12225#[cfg_attr(test, assert_instr(sqrdmulh))]
a2a8927a 12226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12227pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
12228 simd_extract(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0)
17df50a5
XL
12229}
12230
3c0e092e 12231/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12232///
12233/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)
17df50a5
XL
12234#[inline]
12235#[target_feature(enable = "neon")]
3c0e092e
XL
12236#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
12237#[rustc_legacy_const_generics(2)]
a2a8927a 12238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12239pub unsafe fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
353b0b11 12240 static_assert_uimm_bits!(LANE, 2);
3c0e092e 12241 vqrdmulhh_s16(a, simd_extract(b, LANE as u32))
17df50a5
XL
12242}
12243
3c0e092e 12244/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12245///
12246/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)
17df50a5
XL
12247#[inline]
12248#[target_feature(enable = "neon")]
3c0e092e 12249#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
17df50a5 12250#[rustc_legacy_const_generics(2)]
a2a8927a 12251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12252pub unsafe fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
353b0b11 12253 static_assert_uimm_bits!(LANE, 3);
3c0e092e 12254 vqrdmulhh_s16(a, simd_extract(b, LANE as u32))
17df50a5
XL
12255}
12256
3c0e092e 12257/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12258///
12259/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)
17df50a5
XL
12260#[inline]
12261#[target_feature(enable = "neon")]
3c0e092e 12262#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
17df50a5 12263#[rustc_legacy_const_generics(2)]
a2a8927a 12264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12265pub unsafe fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
353b0b11 12266 static_assert_uimm_bits!(LANE, 1);
3c0e092e 12267 vqrdmulhs_s32(a, simd_extract(b, LANE as u32))
17df50a5
XL
12268}
12269
3c0e092e 12270/// Signed saturating rounding doubling multiply returning high half
f2b60f7d
FG
12271///
12272/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)
17df50a5
XL
12273#[inline]
12274#[target_feature(enable = "neon")]
3c0e092e 12275#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
17df50a5 12276#[rustc_legacy_const_generics(2)]
a2a8927a 12277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12278pub unsafe fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
353b0b11 12279 static_assert_uimm_bits!(LANE, 2);
3c0e092e 12280 vqrdmulhs_s32(a, simd_extract(b, LANE as u32))
17df50a5
XL
12281}
12282
3c0e092e 12283/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12284///
12285/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)
17df50a5 12286#[inline]
3c0e092e
XL
12287#[target_feature(enable = "rdm")]
12288#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12289#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12290pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
5e7ed085
FG
12291 #[allow(improper_ctypes)]
12292 extern "unadjusted" {
12293 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlah.v4i16")]
12294 fn vqrdmlah_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
12295 }
12296 vqrdmlah_s16_(a, b, c)
17df50a5
XL
12297}
12298
3c0e092e 12299/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12300///
12301/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)
17df50a5 12302#[inline]
3c0e092e
XL
12303#[target_feature(enable = "rdm")]
12304#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12305#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12306pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
5e7ed085
FG
12307 #[allow(improper_ctypes)]
12308 extern "unadjusted" {
12309 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlah.v8i16")]
12310 fn vqrdmlahq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
12311 }
12312 vqrdmlahq_s16_(a, b, c)
17df50a5
XL
12313}
12314
3c0e092e 12315/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12316///
12317/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)
17df50a5 12318#[inline]
3c0e092e
XL
12319#[target_feature(enable = "rdm")]
12320#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12321#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12322pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
5e7ed085
FG
12323 #[allow(improper_ctypes)]
12324 extern "unadjusted" {
12325 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlah.v2i32")]
12326 fn vqrdmlah_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
12327 }
12328 vqrdmlah_s32_(a, b, c)
17df50a5
XL
12329}
12330
3c0e092e 12331/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12332///
12333/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)
17df50a5 12334#[inline]
3c0e092e
XL
12335#[target_feature(enable = "rdm")]
12336#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12337#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12338pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
5e7ed085
FG
12339 #[allow(improper_ctypes)]
12340 extern "unadjusted" {
12341 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlah.v4i32")]
12342 fn vqrdmlahq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
12343 }
12344 vqrdmlahq_s32_(a, b, c)
17df50a5
XL
12345}
12346
3c0e092e 12347/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12348///
12349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)
17df50a5 12350#[inline]
3c0e092e
XL
12351#[target_feature(enable = "rdm")]
12352#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12353#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e
XL
12354pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
12355 let a: int16x4_t = vdup_n_s16(a);
12356 let b: int16x4_t = vdup_n_s16(b);
12357 let c: int16x4_t = vdup_n_s16(c);
12358 simd_extract(vqrdmlah_s16(a, b, c), 0)
17df50a5
XL
12359}
12360
3c0e092e 12361/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12362///
12363/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)
17df50a5 12364#[inline]
3c0e092e
XL
12365#[target_feature(enable = "rdm")]
12366#[cfg_attr(test, assert_instr(sqrdmlah))]
04454e1e 12367#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e
XL
12368pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
12369 let a: int32x2_t = vdup_n_s32(a);
12370 let b: int32x2_t = vdup_n_s32(b);
12371 let c: int32x2_t = vdup_n_s32(c);
12372 simd_extract(vqrdmlah_s32(a, b, c), 0)
17df50a5
XL
12373}
12374
3c0e092e 12375/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12376///
12377/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)
17df50a5 12378#[inline]
3c0e092e
XL
12379#[target_feature(enable = "rdm")]
12380#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12381#[rustc_legacy_const_generics(3)]
04454e1e 12382#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12383pub unsafe fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
353b0b11
FG
12384 static_assert_uimm_bits!(LANE, 2);
12385 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12386 vqrdmlah_s16(a, b, c)
17df50a5
XL
12387}
12388
3c0e092e 12389/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12390///
12391/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)
17df50a5 12392#[inline]
3c0e092e
XL
12393#[target_feature(enable = "rdm")]
12394#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12395#[rustc_legacy_const_generics(3)]
04454e1e 12396#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12397pub unsafe fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
353b0b11
FG
12398 static_assert_uimm_bits!(LANE, 3);
12399 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12400 vqrdmlah_s16(a, b, c)
17df50a5
XL
12401}
12402
3c0e092e 12403/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12404///
12405/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)
17df50a5 12406#[inline]
3c0e092e
XL
12407#[target_feature(enable = "rdm")]
12408#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12409#[rustc_legacy_const_generics(3)]
04454e1e 12410#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12411pub unsafe fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
353b0b11
FG
12412 static_assert_uimm_bits!(LANE, 2);
12413 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12414 vqrdmlahq_s16(a, b, c)
17df50a5
XL
12415}
12416
3c0e092e 12417/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12418///
12419/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)
17df50a5 12420#[inline]
3c0e092e
XL
12421#[target_feature(enable = "rdm")]
12422#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12423#[rustc_legacy_const_generics(3)]
04454e1e 12424#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12425pub unsafe fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
353b0b11
FG
12426 static_assert_uimm_bits!(LANE, 3);
12427 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12428 vqrdmlahq_s16(a, b, c)
17df50a5
XL
12429}
12430
3c0e092e 12431/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12432///
12433/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)
17df50a5 12434#[inline]
3c0e092e
XL
12435#[target_feature(enable = "rdm")]
12436#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12437#[rustc_legacy_const_generics(3)]
04454e1e 12438#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12439pub unsafe fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
353b0b11
FG
12440 static_assert_uimm_bits!(LANE, 1);
12441 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
5e7ed085 12442 vqrdmlah_s32(a, b, c)
17df50a5
XL
12443}
12444
3c0e092e 12445/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12446///
12447/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)
17df50a5 12448#[inline]
3c0e092e
XL
12449#[target_feature(enable = "rdm")]
12450#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12451#[rustc_legacy_const_generics(3)]
04454e1e 12452#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12453pub unsafe fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
353b0b11
FG
12454 static_assert_uimm_bits!(LANE, 2);
12455 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
5e7ed085 12456 vqrdmlah_s32(a, b, c)
17df50a5
XL
12457}
12458
3c0e092e 12459/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12460///
12461/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)
17df50a5 12462#[inline]
3c0e092e
XL
12463#[target_feature(enable = "rdm")]
12464#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12465#[rustc_legacy_const_generics(3)]
04454e1e 12466#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12467pub unsafe fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
353b0b11
FG
12468 static_assert_uimm_bits!(LANE, 1);
12469 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12470 vqrdmlahq_s32(a, b, c)
17df50a5
XL
12471}
12472
3c0e092e 12473/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12474///
12475/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)
17df50a5 12476#[inline]
3c0e092e
XL
12477#[target_feature(enable = "rdm")]
12478#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12479#[rustc_legacy_const_generics(3)]
04454e1e 12480#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12481pub unsafe fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
353b0b11
FG
12482 static_assert_uimm_bits!(LANE, 2);
12483 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
5e7ed085 12484 vqrdmlahq_s32(a, b, c)
17df50a5
XL
12485}
12486
3c0e092e 12487/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12488///
12489/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)
17df50a5 12490#[inline]
3c0e092e
XL
12491#[target_feature(enable = "rdm")]
12492#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12493#[rustc_legacy_const_generics(3)]
04454e1e 12494#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12495pub unsafe fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
353b0b11 12496 static_assert_uimm_bits!(LANE, 2);
3c0e092e 12497 vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12498}
12499
3c0e092e 12500/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12501///
12502/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)
17df50a5 12503#[inline]
3c0e092e
XL
12504#[target_feature(enable = "rdm")]
12505#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12506#[rustc_legacy_const_generics(3)]
04454e1e 12507#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12508pub unsafe fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
353b0b11 12509 static_assert_uimm_bits!(LANE, 3);
3c0e092e 12510 vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12511}
12512
3c0e092e 12513/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12514///
12515/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)
17df50a5 12516#[inline]
3c0e092e
XL
12517#[target_feature(enable = "rdm")]
12518#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12519#[rustc_legacy_const_generics(3)]
04454e1e 12520#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12521pub unsafe fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
353b0b11 12522 static_assert_uimm_bits!(LANE, 1);
3c0e092e 12523 vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12524}
12525
3c0e092e 12526/// Signed saturating rounding doubling multiply accumulate returning high half
f2b60f7d
FG
12527///
12528/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)
17df50a5 12529#[inline]
3c0e092e
XL
12530#[target_feature(enable = "rdm")]
12531#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12532#[rustc_legacy_const_generics(3)]
04454e1e 12533#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12534pub unsafe fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
353b0b11 12535 static_assert_uimm_bits!(LANE, 2);
3c0e092e 12536 vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12537}
12538
3c0e092e 12539/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12540///
12541/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)
17df50a5 12542#[inline]
04454e1e
FG
12543#[target_feature(enable = "rdm")]
12544#[cfg_attr(test, assert_instr(sqrdmlsh))]
12545#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12546pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
12547 #[allow(improper_ctypes)]
12548 extern "unadjusted" {
12549 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16")]
12550 fn vqrdmlsh_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
12551 }
12552 vqrdmlsh_s16_(a, b, c)
12553}
12554
12555/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12556///
12557/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)
04454e1e
FG
12558#[inline]
12559#[target_feature(enable = "rdm")]
12560#[cfg_attr(test, assert_instr(sqrdmlsh))]
12561#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12562pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
12563 #[allow(improper_ctypes)]
12564 extern "unadjusted" {
12565 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16")]
12566 fn vqrdmlshq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
12567 }
12568 vqrdmlshq_s16_(a, b, c)
12569}
12570
12571/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12572///
12573/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)
04454e1e
FG
12574#[inline]
12575#[target_feature(enable = "rdm")]
12576#[cfg_attr(test, assert_instr(sqrdmlsh))]
12577#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12578pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
12579 #[allow(improper_ctypes)]
12580 extern "unadjusted" {
12581 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32")]
12582 fn vqrdmlsh_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
12583 }
12584 vqrdmlsh_s32_(a, b, c)
12585}
12586
12587/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12588///
12589/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)
04454e1e
FG
12590#[inline]
12591#[target_feature(enable = "rdm")]
12592#[cfg_attr(test, assert_instr(sqrdmlsh))]
12593#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12594pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
12595 #[allow(improper_ctypes)]
12596 extern "unadjusted" {
12597 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32")]
12598 fn vqrdmlshq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
12599 }
12600 vqrdmlshq_s32_(a, b, c)
12601}
12602
12603/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12604///
12605/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)
04454e1e
FG
12606#[inline]
12607#[target_feature(enable = "rdm")]
12608#[cfg_attr(test, assert_instr(sqrdmlsh))]
12609#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12610pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
04454e1e
FG
12611 let a: int16x4_t = vdup_n_s16(a);
12612 let b: int16x4_t = vdup_n_s16(b);
12613 let c: int16x4_t = vdup_n_s16(c);
12614 simd_extract(vqrdmlsh_s16(a, b, c), 0)
17df50a5
XL
12615}
12616
3c0e092e 12617/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12618///
12619/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)
17df50a5 12620#[inline]
04454e1e
FG
12621#[target_feature(enable = "rdm")]
12622#[cfg_attr(test, assert_instr(sqrdmlsh))]
12623#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12624pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
04454e1e
FG
12625 let a: int32x2_t = vdup_n_s32(a);
12626 let b: int32x2_t = vdup_n_s32(b);
12627 let c: int32x2_t = vdup_n_s32(c);
12628 simd_extract(vqrdmlsh_s32(a, b, c), 0)
17df50a5
XL
12629}
12630
3c0e092e 12631/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12632///
12633/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)
17df50a5 12634#[inline]
04454e1e
FG
12635#[target_feature(enable = "rdm")]
12636#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
3c0e092e 12637#[rustc_legacy_const_generics(3)]
04454e1e
FG
12638#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12639pub unsafe fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
353b0b11
FG
12640 static_assert_uimm_bits!(LANE, 2);
12641 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12642 vqrdmlsh_s16(a, b, c)
12643}
12644
12645/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12646///
12647/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)
04454e1e
FG
12648#[inline]
12649#[target_feature(enable = "rdm")]
12650#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12651#[rustc_legacy_const_generics(3)]
12652#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12653pub unsafe fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
353b0b11
FG
12654 static_assert_uimm_bits!(LANE, 3);
12655 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12656 vqrdmlsh_s16(a, b, c)
12657}
12658
12659/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12660///
12661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)
04454e1e
FG
12662#[inline]
12663#[target_feature(enable = "rdm")]
12664#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12665#[rustc_legacy_const_generics(3)]
12666#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12667pub unsafe fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
353b0b11
FG
12668 static_assert_uimm_bits!(LANE, 2);
12669 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12670 vqrdmlshq_s16(a, b, c)
12671}
12672
12673/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12674///
12675/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)
04454e1e
FG
12676#[inline]
12677#[target_feature(enable = "rdm")]
12678#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12679#[rustc_legacy_const_generics(3)]
12680#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12681pub unsafe fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
353b0b11
FG
12682 static_assert_uimm_bits!(LANE, 3);
12683 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12684 vqrdmlshq_s16(a, b, c)
12685}
12686
12687/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12688///
12689/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)
04454e1e
FG
12690#[inline]
12691#[target_feature(enable = "rdm")]
12692#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12693#[rustc_legacy_const_generics(3)]
12694#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12695pub unsafe fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
353b0b11
FG
12696 static_assert_uimm_bits!(LANE, 1);
12697 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
04454e1e
FG
12698 vqrdmlsh_s32(a, b, c)
12699}
12700
12701/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12702///
12703/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)
04454e1e
FG
12704#[inline]
12705#[target_feature(enable = "rdm")]
12706#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12707#[rustc_legacy_const_generics(3)]
12708#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12709pub unsafe fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
353b0b11
FG
12710 static_assert_uimm_bits!(LANE, 2);
12711 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
04454e1e
FG
12712 vqrdmlsh_s32(a, b, c)
12713}
12714
12715/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12716///
12717/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)
04454e1e
FG
12718#[inline]
12719#[target_feature(enable = "rdm")]
12720#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12721#[rustc_legacy_const_generics(3)]
12722#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12723pub unsafe fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
353b0b11
FG
12724 static_assert_uimm_bits!(LANE, 1);
12725 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12726 vqrdmlshq_s32(a, b, c)
12727}
12728
12729/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12730///
12731/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)
04454e1e
FG
12732#[inline]
12733#[target_feature(enable = "rdm")]
12734#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12735#[rustc_legacy_const_generics(3)]
12736#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12737pub unsafe fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
353b0b11
FG
12738 static_assert_uimm_bits!(LANE, 2);
12739 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
04454e1e
FG
12740 vqrdmlshq_s32(a, b, c)
12741}
12742
12743/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12744///
12745/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)
04454e1e
FG
12746#[inline]
12747#[target_feature(enable = "rdm")]
12748#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12749#[rustc_legacy_const_generics(3)]
12750#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12751pub unsafe fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
353b0b11 12752 static_assert_uimm_bits!(LANE, 2);
04454e1e 12753 vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12754}
12755
3c0e092e 12756/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12757///
12758/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)
17df50a5 12759#[inline]
04454e1e
FG
12760#[target_feature(enable = "rdm")]
12761#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
3c0e092e 12762#[rustc_legacy_const_generics(3)]
04454e1e 12763#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12764pub unsafe fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
353b0b11 12765 static_assert_uimm_bits!(LANE, 3);
04454e1e 12766 vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12767}
12768
3c0e092e 12769/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12770///
12771/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)
17df50a5 12772#[inline]
04454e1e
FG
12773#[target_feature(enable = "rdm")]
12774#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
3c0e092e 12775#[rustc_legacy_const_generics(3)]
04454e1e 12776#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12777pub unsafe fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
353b0b11 12778 static_assert_uimm_bits!(LANE, 1);
04454e1e 12779 vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12780}
12781
3c0e092e 12782/// Signed saturating rounding doubling multiply subtract returning high half
f2b60f7d
FG
12783///
12784/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)
17df50a5 12785#[inline]
04454e1e
FG
12786#[target_feature(enable = "rdm")]
12787#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
3c0e092e 12788#[rustc_legacy_const_generics(3)]
04454e1e 12789#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
3c0e092e 12790pub unsafe fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
353b0b11 12791 static_assert_uimm_bits!(LANE, 2);
04454e1e 12792 vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32))
17df50a5
XL
12793}
12794
3c0e092e 12795/// Signed saturating rounding shift left
f2b60f7d
FG
12796///
12797/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)
17df50a5
XL
12798#[inline]
12799#[target_feature(enable = "neon")]
3c0e092e 12800#[cfg_attr(test, assert_instr(sqrshl))]
a2a8927a 12801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12802pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 {
12803 #[allow(improper_ctypes)]
12804 extern "unadjusted" {
12805 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.i32")]
12806 fn vqrshls_s32_(a: i32, b: i32) -> i32;
12807 }
12808 vqrshls_s32_(a, b)
17df50a5
XL
12809}
12810
3c0e092e 12811/// Signed saturating rounding shift left
f2b60f7d
FG
12812///
12813/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)
17df50a5
XL
12814#[inline]
12815#[target_feature(enable = "neon")]
3c0e092e 12816#[cfg_attr(test, assert_instr(sqrshl))]
a2a8927a 12817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12818pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 {
12819 #[allow(improper_ctypes)]
12820 extern "unadjusted" {
12821 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.i64")]
12822 fn vqrshld_s64_(a: i64, b: i64) -> i64;
12823 }
12824 vqrshld_s64_(a, b)
17df50a5
XL
12825}
12826
3c0e092e 12827/// Signed saturating rounding shift left
f2b60f7d
FG
12828///
12829/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)
17df50a5
XL
12830#[inline]
12831#[target_feature(enable = "neon")]
3c0e092e 12832#[cfg_attr(test, assert_instr(sqrshl))]
a2a8927a 12833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12834pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 {
12835 let a: int8x8_t = vdup_n_s8(a);
12836 let b: int8x8_t = vdup_n_s8(b);
12837 simd_extract(vqrshl_s8(a, b), 0)
17df50a5
XL
12838}
12839
3c0e092e 12840/// Signed saturating rounding shift left
f2b60f7d
FG
12841///
12842/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)
17df50a5
XL
12843#[inline]
12844#[target_feature(enable = "neon")]
3c0e092e 12845#[cfg_attr(test, assert_instr(sqrshl))]
a2a8927a 12846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12847pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 {
12848 let a: int16x4_t = vdup_n_s16(a);
12849 let b: int16x4_t = vdup_n_s16(b);
12850 simd_extract(vqrshl_s16(a, b), 0)
17df50a5
XL
12851}
12852
3c0e092e 12853/// Unsigned signed saturating rounding shift left
f2b60f7d
FG
12854///
12855/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)
17df50a5
XL
12856#[inline]
12857#[target_feature(enable = "neon")]
3c0e092e 12858#[cfg_attr(test, assert_instr(uqrshl))]
a2a8927a 12859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12860pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 {
12861 #[allow(improper_ctypes)]
12862 extern "unadjusted" {
12863 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.i32")]
12864 fn vqrshls_u32_(a: u32, b: i32) -> u32;
12865 }
12866 vqrshls_u32_(a, b)
17df50a5
XL
12867}
12868
3c0e092e 12869/// Unsigned signed saturating rounding shift left
f2b60f7d
FG
12870///
12871/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)
17df50a5
XL
12872#[inline]
12873#[target_feature(enable = "neon")]
3c0e092e 12874#[cfg_attr(test, assert_instr(uqrshl))]
a2a8927a 12875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12876pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 {
12877 #[allow(improper_ctypes)]
12878 extern "unadjusted" {
12879 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.i64")]
12880 fn vqrshld_u64_(a: u64, b: i64) -> u64;
12881 }
12882 vqrshld_u64_(a, b)
17df50a5
XL
12883}
12884
3c0e092e 12885/// Unsigned signed saturating rounding shift left
f2b60f7d
FG
12886///
12887/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)
17df50a5
XL
12888#[inline]
12889#[target_feature(enable = "neon")]
3c0e092e 12890#[cfg_attr(test, assert_instr(uqrshl))]
a2a8927a 12891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12892pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 {
12893 let a: uint8x8_t = vdup_n_u8(a);
12894 let b: int8x8_t = vdup_n_s8(b);
12895 simd_extract(vqrshl_u8(a, b), 0)
17df50a5
XL
12896}
12897
3c0e092e 12898/// Unsigned signed saturating rounding shift left
f2b60f7d
FG
12899///
12900/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)
17df50a5
XL
12901#[inline]
12902#[target_feature(enable = "neon")]
3c0e092e 12903#[cfg_attr(test, assert_instr(uqrshl))]
a2a8927a 12904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
12905pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 {
12906 let a: uint16x4_t = vdup_n_u16(a);
12907 let b: int16x4_t = vdup_n_s16(b);
12908 simd_extract(vqrshl_u16(a, b), 0)
17df50a5
XL
12909}
12910
3c0e092e 12911/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12912///
12913/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)
17df50a5
XL
12914#[inline]
12915#[target_feature(enable = "neon")]
3c0e092e
XL
12916#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12917#[rustc_legacy_const_generics(1)]
a2a8927a 12918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12919pub unsafe fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
353b0b11 12920 static_assert!(N >= 1 && N <= 8);
3c0e092e
XL
12921 let a: int16x8_t = vdupq_n_s16(a);
12922 simd_extract(vqrshrn_n_s16::<N>(a), 0)
17df50a5
XL
12923}
12924
3c0e092e 12925/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12926///
12927/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)
17df50a5
XL
12928#[inline]
12929#[target_feature(enable = "neon")]
3c0e092e
XL
12930#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12931#[rustc_legacy_const_generics(1)]
a2a8927a 12932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12933pub unsafe fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
353b0b11 12934 static_assert!(N >= 1 && N <= 16);
3c0e092e
XL
12935 let a: int32x4_t = vdupq_n_s32(a);
12936 simd_extract(vqrshrn_n_s32::<N>(a), 0)
17df50a5
XL
12937}
12938
3c0e092e 12939/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12940///
12941/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)
17df50a5
XL
12942#[inline]
12943#[target_feature(enable = "neon")]
3c0e092e
XL
12944#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12945#[rustc_legacy_const_generics(1)]
a2a8927a 12946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12947pub unsafe fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
353b0b11 12948 static_assert!(N >= 1 && N <= 32);
3c0e092e
XL
12949 let a: int64x2_t = vdupq_n_s64(a);
12950 simd_extract(vqrshrn_n_s64::<N>(a), 0)
17df50a5
XL
12951}
12952
3c0e092e 12953/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12954///
12955/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)
17df50a5
XL
12956#[inline]
12957#[target_feature(enable = "neon")]
3c0e092e
XL
12958#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12959#[rustc_legacy_const_generics(2)]
a2a8927a 12960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12961pub unsafe fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
353b0b11
FG
12962 static_assert!(N >= 1 && N <= 8);
12963 simd_shuffle!(a, vqrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
12964}
12965
3c0e092e 12966/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12967///
12968/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)
17df50a5
XL
12969#[inline]
12970#[target_feature(enable = "neon")]
3c0e092e
XL
12971#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12972#[rustc_legacy_const_generics(2)]
a2a8927a 12973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12974pub unsafe fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
353b0b11
FG
12975 static_assert!(N >= 1 && N <= 16);
12976 simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
12977}
12978
3c0e092e 12979/// Signed saturating rounded shift right narrow
f2b60f7d
FG
12980///
12981/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)
17df50a5
XL
12982#[inline]
12983#[target_feature(enable = "neon")]
3c0e092e
XL
12984#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12985#[rustc_legacy_const_generics(2)]
a2a8927a 12986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 12987pub unsafe fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
353b0b11
FG
12988 static_assert!(N >= 1 && N <= 32);
12989 simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
12990}
12991
3c0e092e 12992/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
12993///
12994/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)
17df50a5
XL
12995#[inline]
12996#[target_feature(enable = "neon")]
3c0e092e
XL
12997#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
12998#[rustc_legacy_const_generics(1)]
a2a8927a 12999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13000pub unsafe fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
353b0b11 13001 static_assert!(N >= 1 && N <= 8);
3c0e092e
XL
13002 let a: uint16x8_t = vdupq_n_u16(a);
13003 simd_extract(vqrshrn_n_u16::<N>(a), 0)
17df50a5
XL
13004}
13005
3c0e092e 13006/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
13007///
13008/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)
17df50a5
XL
13009#[inline]
13010#[target_feature(enable = "neon")]
3c0e092e
XL
13011#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
13012#[rustc_legacy_const_generics(1)]
a2a8927a 13013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13014pub unsafe fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
353b0b11 13015 static_assert!(N >= 1 && N <= 16);
3c0e092e
XL
13016 let a: uint32x4_t = vdupq_n_u32(a);
13017 simd_extract(vqrshrn_n_u32::<N>(a), 0)
17df50a5
XL
13018}
13019
3c0e092e 13020/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
13021///
13022/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)
17df50a5
XL
13023#[inline]
13024#[target_feature(enable = "neon")]
3c0e092e
XL
13025#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
13026#[rustc_legacy_const_generics(1)]
a2a8927a 13027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13028pub unsafe fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
353b0b11 13029 static_assert!(N >= 1 && N <= 32);
3c0e092e
XL
13030 let a: uint64x2_t = vdupq_n_u64(a);
13031 simd_extract(vqrshrn_n_u64::<N>(a), 0)
17df50a5
XL
13032}
13033
3c0e092e 13034/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
13035///
13036/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)
17df50a5
XL
13037#[inline]
13038#[target_feature(enable = "neon")]
3c0e092e
XL
13039#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13040#[rustc_legacy_const_generics(2)]
a2a8927a 13041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13042pub unsafe fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
353b0b11
FG
13043 static_assert!(N >= 1 && N <= 8);
13044 simd_shuffle!(a, vqrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
13045}
13046
3c0e092e 13047/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
13048///
13049/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)
17df50a5
XL
13050#[inline]
13051#[target_feature(enable = "neon")]
3c0e092e
XL
13052#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13053#[rustc_legacy_const_generics(2)]
a2a8927a 13054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13055pub unsafe fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
353b0b11
FG
13056 static_assert!(N >= 1 && N <= 16);
13057 simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
13058}
13059
3c0e092e 13060/// Unsigned saturating rounded shift right narrow
f2b60f7d
FG
13061///
13062/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)
17df50a5
XL
13063#[inline]
13064#[target_feature(enable = "neon")]
3c0e092e
XL
13065#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13066#[rustc_legacy_const_generics(2)]
a2a8927a 13067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13068pub unsafe fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
353b0b11
FG
13069 static_assert!(N >= 1 && N <= 32);
13070 simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
13071}
13072
3c0e092e 13073/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13074///
13075/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)
17df50a5
XL
13076#[inline]
13077#[target_feature(enable = "neon")]
3c0e092e
XL
13078#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13079#[rustc_legacy_const_generics(1)]
a2a8927a 13080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13081pub unsafe fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
353b0b11 13082 static_assert!(N >= 1 && N <= 8);
3c0e092e
XL
13083 let a: int16x8_t = vdupq_n_s16(a);
13084 simd_extract(vqrshrun_n_s16::<N>(a), 0)
17df50a5
XL
13085}
13086
3c0e092e 13087/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13088///
13089/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)
17df50a5
XL
13090#[inline]
13091#[target_feature(enable = "neon")]
3c0e092e
XL
13092#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13093#[rustc_legacy_const_generics(1)]
a2a8927a 13094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13095pub unsafe fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
353b0b11 13096 static_assert!(N >= 1 && N <= 16);
3c0e092e
XL
13097 let a: int32x4_t = vdupq_n_s32(a);
13098 simd_extract(vqrshrun_n_s32::<N>(a), 0)
17df50a5
XL
13099}
13100
3c0e092e 13101/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13102///
13103/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)
17df50a5
XL
13104#[inline]
13105#[target_feature(enable = "neon")]
3c0e092e
XL
13106#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13107#[rustc_legacy_const_generics(1)]
a2a8927a 13108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13109pub unsafe fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
353b0b11 13110 static_assert!(N >= 1 && N <= 32);
3c0e092e
XL
13111 let a: int64x2_t = vdupq_n_s64(a);
13112 simd_extract(vqrshrun_n_s64::<N>(a), 0)
17df50a5
XL
13113}
13114
3c0e092e 13115/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13116///
13117/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)
17df50a5
XL
13118#[inline]
13119#[target_feature(enable = "neon")]
3c0e092e
XL
13120#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13121#[rustc_legacy_const_generics(2)]
a2a8927a 13122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13123pub unsafe fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
353b0b11
FG
13124 static_assert!(N >= 1 && N <= 8);
13125 simd_shuffle!(a, vqrshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
13126}
13127
3c0e092e 13128/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13129///
13130/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)
17df50a5
XL
13131#[inline]
13132#[target_feature(enable = "neon")]
3c0e092e
XL
13133#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13134#[rustc_legacy_const_generics(2)]
a2a8927a 13135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13136pub unsafe fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
353b0b11
FG
13137 static_assert!(N >= 1 && N <= 16);
13138 simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
13139}
13140
3c0e092e 13141/// Signed saturating rounded shift right unsigned narrow
f2b60f7d
FG
13142///
13143/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)
17df50a5
XL
13144#[inline]
13145#[target_feature(enable = "neon")]
3c0e092e
XL
13146#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13147#[rustc_legacy_const_generics(2)]
a2a8927a 13148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13149pub unsafe fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
353b0b11
FG
13150 static_assert!(N >= 1 && N <= 32);
13151 simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
13152}
13153
3c0e092e 13154/// Signed saturating shift left
f2b60f7d
FG
13155///
13156/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)
17df50a5
XL
13157#[inline]
13158#[target_feature(enable = "neon")]
3c0e092e 13159#[cfg_attr(test, assert_instr(sqshl))]
a2a8927a 13160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13161pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 {
13162 #[allow(improper_ctypes)]
13163 extern "unadjusted" {
13164 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.i64")]
13165 fn vqshld_s64_(a: i64, b: i64) -> i64;
13166 }
13167 vqshld_s64_(a, b)
17df50a5
XL
13168}
13169
3c0e092e 13170/// Signed saturating shift left
f2b60f7d
FG
13171///
13172/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)
17df50a5
XL
13173#[inline]
13174#[target_feature(enable = "neon")]
3c0e092e 13175#[cfg_attr(test, assert_instr(sqshl))]
a2a8927a 13176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13177pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 {
13178 let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
13179 simd_extract(c, 0)
17df50a5
XL
13180}
13181
3c0e092e 13182/// Signed saturating shift left
f2b60f7d
FG
13183///
13184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)
17df50a5
XL
13185#[inline]
13186#[target_feature(enable = "neon")]
3c0e092e 13187#[cfg_attr(test, assert_instr(sqshl))]
a2a8927a 13188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13189pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 {
13190 let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
13191 simd_extract(c, 0)
17df50a5
XL
13192}
13193
3c0e092e 13194/// Signed saturating shift left
f2b60f7d
FG
13195///
13196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)
17df50a5
XL
13197#[inline]
13198#[target_feature(enable = "neon")]
3c0e092e 13199#[cfg_attr(test, assert_instr(sqshl))]
a2a8927a 13200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13201pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 {
13202 let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
13203 simd_extract(c, 0)
17df50a5
XL
13204}
13205
3c0e092e 13206/// Unsigned saturating shift left
f2b60f7d
FG
13207///
13208/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)
17df50a5
XL
13209#[inline]
13210#[target_feature(enable = "neon")]
3c0e092e 13211#[cfg_attr(test, assert_instr(uqshl))]
a2a8927a 13212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13213pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 {
13214 #[allow(improper_ctypes)]
13215 extern "unadjusted" {
13216 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.i64")]
13217 fn vqshld_u64_(a: u64, b: i64) -> u64;
13218 }
13219 vqshld_u64_(a, b)
17df50a5
XL
13220}
13221
3c0e092e 13222/// Unsigned saturating shift left
f2b60f7d
FG
13223///
13224/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)
17df50a5
XL
13225#[inline]
13226#[target_feature(enable = "neon")]
3c0e092e 13227#[cfg_attr(test, assert_instr(uqshl))]
a2a8927a 13228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13229pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 {
13230 let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
13231 simd_extract(c, 0)
17df50a5
XL
13232}
13233
3c0e092e 13234/// Unsigned saturating shift left
f2b60f7d
FG
13235///
13236/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)
17df50a5
XL
13237#[inline]
13238#[target_feature(enable = "neon")]
3c0e092e 13239#[cfg_attr(test, assert_instr(uqshl))]
a2a8927a 13240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13241pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 {
13242 let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
13243 simd_extract(c, 0)
17df50a5
XL
13244}
13245
3c0e092e 13246/// Unsigned saturating shift left
f2b60f7d
FG
13247///
13248/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)
17df50a5
XL
13249#[inline]
13250#[target_feature(enable = "neon")]
3c0e092e 13251#[cfg_attr(test, assert_instr(uqshl))]
a2a8927a 13252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13253pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 {
13254 let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
13255 simd_extract(c, 0)
17df50a5
XL
13256}
13257
3c0e092e 13258/// Signed saturating shift left
f2b60f7d
FG
13259///
13260/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)
17df50a5
XL
13261#[inline]
13262#[target_feature(enable = "neon")]
3c0e092e
XL
13263#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13264#[rustc_legacy_const_generics(1)]
a2a8927a 13265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13266pub unsafe fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
353b0b11 13267 static_assert_uimm_bits!(N, 3);
3c0e092e 13268 simd_extract(vqshl_n_s8::<N>(vdup_n_s8(a)), 0)
17df50a5
XL
13269}
13270
3c0e092e 13271/// Signed saturating shift left
f2b60f7d
FG
13272///
13273/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)
17df50a5
XL
13274#[inline]
13275#[target_feature(enable = "neon")]
3c0e092e
XL
13276#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13277#[rustc_legacy_const_generics(1)]
a2a8927a 13278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13279pub unsafe fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
353b0b11 13280 static_assert_uimm_bits!(N, 4);
3c0e092e 13281 simd_extract(vqshl_n_s16::<N>(vdup_n_s16(a)), 0)
17df50a5
XL
13282}
13283
3c0e092e 13284/// Signed saturating shift left
f2b60f7d
FG
13285///
13286/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)
17df50a5
XL
13287#[inline]
13288#[target_feature(enable = "neon")]
3c0e092e
XL
13289#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13290#[rustc_legacy_const_generics(1)]
a2a8927a 13291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13292pub unsafe fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
353b0b11 13293 static_assert_uimm_bits!(N, 5);
3c0e092e 13294 simd_extract(vqshl_n_s32::<N>(vdup_n_s32(a)), 0)
17df50a5
XL
13295}
13296
3c0e092e 13297/// Signed saturating shift left
f2b60f7d
FG
13298///
13299/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)
17df50a5
XL
13300#[inline]
13301#[target_feature(enable = "neon")]
3c0e092e
XL
13302#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13303#[rustc_legacy_const_generics(1)]
a2a8927a 13304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13305pub unsafe fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
353b0b11 13306 static_assert_uimm_bits!(N, 6);
3c0e092e 13307 simd_extract(vqshl_n_s64::<N>(vdup_n_s64(a)), 0)
17df50a5
XL
13308}
13309
3c0e092e 13310/// Unsigned saturating shift left
f2b60f7d
FG
13311///
13312/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)
17df50a5
XL
13313#[inline]
13314#[target_feature(enable = "neon")]
3c0e092e
XL
13315#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13316#[rustc_legacy_const_generics(1)]
a2a8927a 13317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13318pub unsafe fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
353b0b11 13319 static_assert_uimm_bits!(N, 3);
3c0e092e 13320 simd_extract(vqshl_n_u8::<N>(vdup_n_u8(a)), 0)
17df50a5
XL
13321}
13322
3c0e092e 13323/// Unsigned saturating shift left
f2b60f7d
FG
13324///
13325/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)
17df50a5
XL
13326#[inline]
13327#[target_feature(enable = "neon")]
3c0e092e
XL
13328#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13329#[rustc_legacy_const_generics(1)]
a2a8927a 13330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13331pub unsafe fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
353b0b11 13332 static_assert_uimm_bits!(N, 4);
3c0e092e 13333 simd_extract(vqshl_n_u16::<N>(vdup_n_u16(a)), 0)
17df50a5
XL
13334}
13335
3c0e092e 13336/// Unsigned saturating shift left
f2b60f7d
FG
13337///
13338/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)
17df50a5
XL
13339#[inline]
13340#[target_feature(enable = "neon")]
3c0e092e
XL
13341#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13342#[rustc_legacy_const_generics(1)]
a2a8927a 13343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13344pub unsafe fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
353b0b11 13345 static_assert_uimm_bits!(N, 5);
3c0e092e 13346 simd_extract(vqshl_n_u32::<N>(vdup_n_u32(a)), 0)
17df50a5
XL
13347}
13348
3c0e092e 13349/// Unsigned saturating shift left
f2b60f7d
FG
13350///
13351/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)
17df50a5
XL
13352#[inline]
13353#[target_feature(enable = "neon")]
3c0e092e
XL
13354#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13355#[rustc_legacy_const_generics(1)]
a2a8927a 13356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13357pub unsafe fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
353b0b11 13358 static_assert_uimm_bits!(N, 6);
3c0e092e 13359 simd_extract(vqshl_n_u64::<N>(vdup_n_u64(a)), 0)
17df50a5
XL
13360}
13361
3c0e092e 13362/// Signed saturating shift left unsigned
f2b60f7d
FG
13363///
13364/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)
17df50a5
XL
13365#[inline]
13366#[target_feature(enable = "neon")]
3c0e092e
XL
13367#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13368#[rustc_legacy_const_generics(1)]
a2a8927a 13369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13370pub unsafe fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
353b0b11 13371 static_assert_uimm_bits!(N, 3);
3c0e092e 13372 simd_extract(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0)
17df50a5
XL
13373}
13374
3c0e092e 13375/// Signed saturating shift left unsigned
f2b60f7d
FG
13376///
13377/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)
17df50a5
XL
13378#[inline]
13379#[target_feature(enable = "neon")]
3c0e092e
XL
13380#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13381#[rustc_legacy_const_generics(1)]
a2a8927a 13382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13383pub unsafe fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
353b0b11 13384 static_assert_uimm_bits!(N, 4);
3c0e092e 13385 simd_extract(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0)
17df50a5
XL
13386}
13387
3c0e092e 13388/// Signed saturating shift left unsigned
f2b60f7d
FG
13389///
13390/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)
17df50a5
XL
13391#[inline]
13392#[target_feature(enable = "neon")]
3c0e092e
XL
13393#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13394#[rustc_legacy_const_generics(1)]
a2a8927a 13395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13396pub unsafe fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
353b0b11 13397 static_assert_uimm_bits!(N, 5);
3c0e092e 13398 simd_extract(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0)
17df50a5
XL
13399}
13400
3c0e092e 13401/// Signed saturating shift left unsigned
f2b60f7d
FG
13402///
13403/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)
17df50a5
XL
13404#[inline]
13405#[target_feature(enable = "neon")]
3c0e092e
XL
13406#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13407#[rustc_legacy_const_generics(1)]
a2a8927a 13408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13409pub unsafe fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
353b0b11 13410 static_assert_uimm_bits!(N, 6);
3c0e092e 13411 simd_extract(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0)
17df50a5
XL
13412}
13413
3c0e092e 13414/// Signed saturating shift right narrow
f2b60f7d
FG
13415///
13416/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)
17df50a5
XL
13417#[inline]
13418#[target_feature(enable = "neon")]
3c0e092e
XL
13419#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13420#[rustc_legacy_const_generics(1)]
a2a8927a 13421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13422pub unsafe fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
353b0b11 13423 static_assert!(N >= 1 && N <= 32);
3c0e092e
XL
13424 #[allow(improper_ctypes)]
13425 extern "unadjusted" {
13426 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.i32")]
13427 fn vqshrnd_n_s64_(a: i64, n: i32) -> i32;
13428 }
13429 vqshrnd_n_s64_(a, N)
17df50a5
XL
13430}
13431
3c0e092e 13432/// Signed saturating shift right narrow
f2b60f7d
FG
13433///
13434/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)
17df50a5
XL
13435#[inline]
13436#[target_feature(enable = "neon")]
3c0e092e
XL
13437#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13438#[rustc_legacy_const_generics(1)]
a2a8927a 13439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13440pub unsafe fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
353b0b11 13441 static_assert!(N >= 1 && N <= 8);
3c0e092e 13442 simd_extract(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0)
17df50a5
XL
13443}
13444
3c0e092e 13445/// Signed saturating shift right narrow
f2b60f7d
FG
13446///
13447/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)
17df50a5
XL
13448#[inline]
13449#[target_feature(enable = "neon")]
3c0e092e
XL
13450#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13451#[rustc_legacy_const_generics(1)]
a2a8927a 13452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13453pub unsafe fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
353b0b11 13454 static_assert!(N >= 1 && N <= 16);
3c0e092e 13455 simd_extract(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0)
17df50a5
XL
13456}
13457
3c0e092e 13458/// Signed saturating shift right narrow
f2b60f7d
FG
13459///
13460/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)
17df50a5
XL
13461#[inline]
13462#[target_feature(enable = "neon")]
3c0e092e
XL
13463#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13464#[rustc_legacy_const_generics(2)]
a2a8927a 13465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13466pub unsafe fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
353b0b11
FG
13467 static_assert!(N >= 1 && N <= 8);
13468 simd_shuffle!(a, vqshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
13469}
13470
3c0e092e 13471/// Signed saturating shift right narrow
f2b60f7d
FG
13472///
13473/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)
17df50a5
XL
13474#[inline]
13475#[target_feature(enable = "neon")]
3c0e092e
XL
13476#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13477#[rustc_legacy_const_generics(2)]
a2a8927a 13478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13479pub unsafe fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
353b0b11
FG
13480 static_assert!(N >= 1 && N <= 16);
13481 simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
13482}
13483
3c0e092e 13484/// Signed saturating shift right narrow
f2b60f7d
FG
13485///
13486/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)
17df50a5
XL
13487#[inline]
13488#[target_feature(enable = "neon")]
3c0e092e
XL
13489#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13490#[rustc_legacy_const_generics(2)]
a2a8927a 13491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13492pub unsafe fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
353b0b11
FG
13493 static_assert!(N >= 1 && N <= 32);
13494 simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
13495}
13496
3c0e092e 13497/// Unsigned saturating shift right narrow
f2b60f7d
FG
13498///
13499/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)
17df50a5
XL
13500#[inline]
13501#[target_feature(enable = "neon")]
3c0e092e
XL
13502#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13503#[rustc_legacy_const_generics(1)]
a2a8927a 13504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13505pub unsafe fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
353b0b11 13506 static_assert!(N >= 1 && N <= 32);
3c0e092e
XL
13507 #[allow(improper_ctypes)]
13508 extern "unadjusted" {
13509 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.i32")]
13510 fn vqshrnd_n_u64_(a: u64, n: i32) -> u32;
13511 }
13512 vqshrnd_n_u64_(a, N)
17df50a5
XL
13513}
13514
3c0e092e 13515/// Unsigned saturating shift right narrow
f2b60f7d
FG
13516///
13517/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)
17df50a5
XL
13518#[inline]
13519#[target_feature(enable = "neon")]
3c0e092e
XL
13520#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13521#[rustc_legacy_const_generics(1)]
a2a8927a 13522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13523pub unsafe fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
353b0b11 13524 static_assert!(N >= 1 && N <= 8);
3c0e092e 13525 simd_extract(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0)
17df50a5
XL
13526}
13527
3c0e092e 13528/// Unsigned saturating shift right narrow
f2b60f7d
FG
13529///
13530/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)
17df50a5
XL
13531#[inline]
13532#[target_feature(enable = "neon")]
3c0e092e
XL
13533#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13534#[rustc_legacy_const_generics(1)]
a2a8927a 13535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13536pub unsafe fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
353b0b11 13537 static_assert!(N >= 1 && N <= 16);
3c0e092e 13538 simd_extract(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0)
17df50a5
XL
13539}
13540
3c0e092e 13541/// Unsigned saturating shift right narrow
f2b60f7d
FG
13542///
13543/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)
17df50a5
XL
13544#[inline]
13545#[target_feature(enable = "neon")]
3c0e092e
XL
13546#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13547#[rustc_legacy_const_generics(2)]
a2a8927a 13548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13549pub unsafe fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
353b0b11
FG
13550 static_assert!(N >= 1 && N <= 8);
13551 simd_shuffle!(a, vqshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
13552}
13553
3c0e092e 13554/// Unsigned saturating shift right narrow
f2b60f7d
FG
13555///
13556/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)
17df50a5
XL
13557#[inline]
13558#[target_feature(enable = "neon")]
3c0e092e
XL
13559#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13560#[rustc_legacy_const_generics(2)]
a2a8927a 13561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13562pub unsafe fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
353b0b11
FG
13563 static_assert!(N >= 1 && N <= 16);
13564 simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
13565}
13566
3c0e092e 13567/// Unsigned saturating shift right narrow
f2b60f7d
FG
13568///
13569/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)
17df50a5
XL
13570#[inline]
13571#[target_feature(enable = "neon")]
3c0e092e
XL
13572#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13573#[rustc_legacy_const_generics(2)]
a2a8927a 13574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13575pub unsafe fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
353b0b11
FG
13576 static_assert!(N >= 1 && N <= 32);
13577 simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
13578}
13579
3c0e092e 13580/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13581///
13582/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)
17df50a5
XL
13583#[inline]
13584#[target_feature(enable = "neon")]
3c0e092e
XL
13585#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13586#[rustc_legacy_const_generics(1)]
a2a8927a 13587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13588pub unsafe fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
353b0b11 13589 static_assert!(N >= 1 && N <= 8);
3c0e092e 13590 simd_extract(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0)
17df50a5
XL
13591}
13592
3c0e092e 13593/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13594///
13595/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)
17df50a5
XL
13596#[inline]
13597#[target_feature(enable = "neon")]
3c0e092e
XL
13598#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13599#[rustc_legacy_const_generics(1)]
a2a8927a 13600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13601pub unsafe fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
353b0b11 13602 static_assert!(N >= 1 && N <= 16);
3c0e092e 13603 simd_extract(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0)
17df50a5
XL
13604}
13605
3c0e092e 13606/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13607///
13608/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)
17df50a5
XL
13609#[inline]
13610#[target_feature(enable = "neon")]
3c0e092e
XL
13611#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13612#[rustc_legacy_const_generics(1)]
a2a8927a 13613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13614pub unsafe fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
353b0b11 13615 static_assert!(N >= 1 && N <= 32);
3c0e092e 13616 simd_extract(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0)
17df50a5
XL
13617}
13618
3c0e092e 13619/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13620///
13621/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)
17df50a5
XL
13622#[inline]
13623#[target_feature(enable = "neon")]
3c0e092e
XL
13624#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13625#[rustc_legacy_const_generics(2)]
a2a8927a 13626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13627pub unsafe fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
353b0b11
FG
13628 static_assert!(N >= 1 && N <= 8);
13629 simd_shuffle!(a, vqshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
17df50a5
XL
13630}
13631
3c0e092e 13632/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13633///
13634/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)
17df50a5
XL
13635#[inline]
13636#[target_feature(enable = "neon")]
3c0e092e
XL
13637#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13638#[rustc_legacy_const_generics(2)]
a2a8927a 13639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13640pub unsafe fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
353b0b11
FG
13641 static_assert!(N >= 1 && N <= 16);
13642 simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
17df50a5
XL
13643}
13644
3c0e092e 13645/// Signed saturating shift right unsigned narrow
f2b60f7d
FG
13646///
13647/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)
17df50a5
XL
13648#[inline]
13649#[target_feature(enable = "neon")]
3c0e092e
XL
13650#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13651#[rustc_legacy_const_generics(2)]
a2a8927a 13652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 13653pub unsafe fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
353b0b11
FG
13654 static_assert!(N >= 1 && N <= 32);
13655 simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3])
17df50a5
XL
13656}
13657
3c0e092e 13658/// Unsigned saturating accumulate of signed value
f2b60f7d
FG
13659///
13660/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)
17df50a5
XL
13661#[inline]
13662#[target_feature(enable = "neon")]
3c0e092e 13663#[cfg_attr(test, assert_instr(usqadd))]
a2a8927a 13664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13665pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 {
13666 simd_extract(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0)
17df50a5
XL
13667}
13668
3c0e092e 13669/// Unsigned saturating accumulate of signed value
f2b60f7d
FG
13670///
13671/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)
17df50a5
XL
13672#[inline]
13673#[target_feature(enable = "neon")]
3c0e092e 13674#[cfg_attr(test, assert_instr(usqadd))]
a2a8927a 13675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13676pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 {
13677 simd_extract(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0)
17df50a5
XL
13678}
13679
3c0e092e 13680/// Unsigned saturating accumulate of signed value
f2b60f7d
FG
13681///
13682/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)
17df50a5
XL
13683#[inline]
13684#[target_feature(enable = "neon")]
3c0e092e 13685#[cfg_attr(test, assert_instr(usqadd))]
a2a8927a 13686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13687pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 {
13688 #[allow(improper_ctypes)]
13689 extern "unadjusted" {
13690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.usqadd.i32")]
13691 fn vsqadds_u32_(a: u32, b: i32) -> u32;
13692 }
13693 vsqadds_u32_(a, b)
17df50a5
XL
13694}
13695
3c0e092e 13696/// Unsigned saturating accumulate of signed value
f2b60f7d
FG
13697///
13698/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)
17df50a5
XL
13699#[inline]
13700#[target_feature(enable = "neon")]
3c0e092e 13701#[cfg_attr(test, assert_instr(usqadd))]
a2a8927a 13702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13703pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 {
13704 #[allow(improper_ctypes)]
13705 extern "unadjusted" {
13706 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.usqadd.i64")]
13707 fn vsqaddd_u64_(a: u64, b: i64) -> u64;
13708 }
13709 vsqaddd_u64_(a, b)
17df50a5
XL
13710}
13711
3c0e092e 13712/// Calculates the square root of each lane.
f2b60f7d
FG
13713///
13714/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)
17df50a5
XL
13715#[inline]
13716#[target_feature(enable = "neon")]
3c0e092e 13717#[cfg_attr(test, assert_instr(fsqrt))]
a2a8927a 13718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13719pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
13720 simd_fsqrt(a)
17df50a5
XL
13721}
13722
3c0e092e 13723/// Calculates the square root of each lane.
f2b60f7d
FG
13724///
13725/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)
17df50a5
XL
13726#[inline]
13727#[target_feature(enable = "neon")]
3c0e092e 13728#[cfg_attr(test, assert_instr(fsqrt))]
a2a8927a 13729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13730pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
13731 simd_fsqrt(a)
17df50a5
XL
13732}
13733
3c0e092e 13734/// Calculates the square root of each lane.
f2b60f7d
FG
13735///
13736/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)
17df50a5
XL
13737#[inline]
13738#[target_feature(enable = "neon")]
3c0e092e 13739#[cfg_attr(test, assert_instr(fsqrt))]
a2a8927a 13740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13741pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
13742 simd_fsqrt(a)
17df50a5
XL
13743}
13744
3c0e092e 13745/// Calculates the square root of each lane.
f2b60f7d
FG
13746///
13747/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)
17df50a5
XL
13748#[inline]
13749#[target_feature(enable = "neon")]
3c0e092e 13750#[cfg_attr(test, assert_instr(fsqrt))]
a2a8927a 13751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13752pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
13753 simd_fsqrt(a)
17df50a5
XL
13754}
13755
3c0e092e 13756/// Reciprocal square-root estimate.
f2b60f7d
FG
13757///
13758/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)
17df50a5
XL
13759#[inline]
13760#[target_feature(enable = "neon")]
3c0e092e 13761#[cfg_attr(test, assert_instr(frsqrte))]
a2a8927a 13762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13763pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
13764 #[allow(improper_ctypes)]
13765 extern "unadjusted" {
13766 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v1f64")]
13767 fn vrsqrte_f64_(a: float64x1_t) -> float64x1_t;
13768 }
13769 vrsqrte_f64_(a)
17df50a5
XL
13770}
13771
3c0e092e 13772/// Reciprocal square-root estimate.
f2b60f7d
FG
13773///
13774/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)
17df50a5
XL
13775#[inline]
13776#[target_feature(enable = "neon")]
3c0e092e 13777#[cfg_attr(test, assert_instr(frsqrte))]
a2a8927a 13778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13779pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
13780 #[allow(improper_ctypes)]
13781 extern "unadjusted" {
13782 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v2f64")]
13783 fn vrsqrteq_f64_(a: float64x2_t) -> float64x2_t;
13784 }
13785 vrsqrteq_f64_(a)
17df50a5
XL
13786}
13787
3c0e092e 13788/// Reciprocal square-root estimate.
f2b60f7d
FG
13789///
13790/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)
17df50a5
XL
13791#[inline]
13792#[target_feature(enable = "neon")]
3c0e092e 13793#[cfg_attr(test, assert_instr(frsqrte))]
a2a8927a 13794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13795pub unsafe fn vrsqrtes_f32(a: f32) -> f32 {
13796 #[allow(improper_ctypes)]
13797 extern "unadjusted" {
13798 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.f32")]
13799 fn vrsqrtes_f32_(a: f32) -> f32;
13800 }
13801 vrsqrtes_f32_(a)
17df50a5
XL
13802}
13803
3c0e092e 13804/// Reciprocal square-root estimate.
f2b60f7d
FG
13805///
13806/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)
17df50a5
XL
13807#[inline]
13808#[target_feature(enable = "neon")]
3c0e092e 13809#[cfg_attr(test, assert_instr(frsqrte))]
a2a8927a 13810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13811pub unsafe fn vrsqrted_f64(a: f64) -> f64 {
13812 #[allow(improper_ctypes)]
13813 extern "unadjusted" {
13814 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.f64")]
13815 fn vrsqrted_f64_(a: f64) -> f64;
13816 }
13817 vrsqrted_f64_(a)
17df50a5
XL
13818}
13819
3c0e092e 13820/// Floating-point reciprocal square root step
f2b60f7d
FG
13821///
13822/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)
17df50a5
XL
13823#[inline]
13824#[target_feature(enable = "neon")]
3c0e092e 13825#[cfg_attr(test, assert_instr(frsqrts))]
a2a8927a 13826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13827pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13828 #[allow(improper_ctypes)]
13829 extern "unadjusted" {
13830 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.v1f64")]
13831 fn vrsqrts_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13832 }
13833 vrsqrts_f64_(a, b)
17df50a5
XL
13834}
13835
3c0e092e 13836/// Floating-point reciprocal square root step
f2b60f7d
FG
13837///
13838/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)
17df50a5
XL
13839#[inline]
13840#[target_feature(enable = "neon")]
3c0e092e 13841#[cfg_attr(test, assert_instr(frsqrts))]
a2a8927a 13842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13843pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13844 #[allow(improper_ctypes)]
13845 extern "unadjusted" {
13846 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.v2f64")]
13847 fn vrsqrtsq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13848 }
13849 vrsqrtsq_f64_(a, b)
17df50a5
XL
13850}
13851
3c0e092e 13852/// Floating-point reciprocal square root step
f2b60f7d
FG
13853///
13854/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)
17df50a5
XL
13855#[inline]
13856#[target_feature(enable = "neon")]
3c0e092e 13857#[cfg_attr(test, assert_instr(frsqrts))]
a2a8927a 13858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13859pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
13860 #[allow(improper_ctypes)]
13861 extern "unadjusted" {
13862 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.f32")]
13863 fn vrsqrtss_f32_(a: f32, b: f32) -> f32;
13864 }
13865 vrsqrtss_f32_(a, b)
17df50a5
XL
13866}
13867
3c0e092e 13868/// Floating-point reciprocal square root step
f2b60f7d
FG
13869///
13870/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)
17df50a5
XL
13871#[inline]
13872#[target_feature(enable = "neon")]
3c0e092e 13873#[cfg_attr(test, assert_instr(frsqrts))]
a2a8927a 13874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13875pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
13876 #[allow(improper_ctypes)]
13877 extern "unadjusted" {
13878 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.f64")]
13879 fn vrsqrtsd_f64_(a: f64, b: f64) -> f64;
13880 }
13881 vrsqrtsd_f64_(a, b)
17df50a5
XL
13882}
13883
3c0e092e 13884/// Reciprocal estimate.
f2b60f7d
FG
13885///
13886/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)
17df50a5
XL
13887#[inline]
13888#[target_feature(enable = "neon")]
3c0e092e 13889#[cfg_attr(test, assert_instr(frecpe))]
a2a8927a 13890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13891pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
13892 #[allow(improper_ctypes)]
13893 extern "unadjusted" {
13894 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v1f64")]
13895 fn vrecpe_f64_(a: float64x1_t) -> float64x1_t;
13896 }
13897 vrecpe_f64_(a)
17df50a5
XL
13898}
13899
3c0e092e 13900/// Reciprocal estimate.
f2b60f7d
FG
13901///
13902/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)
17df50a5
XL
13903#[inline]
13904#[target_feature(enable = "neon")]
3c0e092e 13905#[cfg_attr(test, assert_instr(frecpe))]
a2a8927a 13906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13907pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
13908 #[allow(improper_ctypes)]
13909 extern "unadjusted" {
13910 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v2f64")]
13911 fn vrecpeq_f64_(a: float64x2_t) -> float64x2_t;
13912 }
13913 vrecpeq_f64_(a)
17df50a5
XL
13914}
13915
3c0e092e 13916/// Reciprocal estimate.
f2b60f7d
FG
13917///
13918/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)
17df50a5
XL
13919#[inline]
13920#[target_feature(enable = "neon")]
3c0e092e 13921#[cfg_attr(test, assert_instr(frecpe))]
a2a8927a 13922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13923pub unsafe fn vrecpes_f32(a: f32) -> f32 {
13924 #[allow(improper_ctypes)]
13925 extern "unadjusted" {
13926 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.f32")]
13927 fn vrecpes_f32_(a: f32) -> f32;
13928 }
13929 vrecpes_f32_(a)
17df50a5
XL
13930}
13931
3c0e092e 13932/// Reciprocal estimate.
f2b60f7d
FG
13933///
13934/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)
17df50a5
XL
13935#[inline]
13936#[target_feature(enable = "neon")]
3c0e092e 13937#[cfg_attr(test, assert_instr(frecpe))]
a2a8927a 13938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13939pub unsafe fn vrecped_f64(a: f64) -> f64 {
13940 #[allow(improper_ctypes)]
13941 extern "unadjusted" {
13942 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.f64")]
13943 fn vrecped_f64_(a: f64) -> f64;
13944 }
13945 vrecped_f64_(a)
17df50a5
XL
13946}
13947
3c0e092e 13948/// Floating-point reciprocal step
f2b60f7d
FG
13949///
13950/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)
17df50a5
XL
13951#[inline]
13952#[target_feature(enable = "neon")]
3c0e092e 13953#[cfg_attr(test, assert_instr(frecps))]
a2a8927a 13954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13955pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13956 #[allow(improper_ctypes)]
13957 extern "unadjusted" {
13958 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.v1f64")]
13959 fn vrecps_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13960 }
13961 vrecps_f64_(a, b)
17df50a5
XL
13962}
13963
3c0e092e 13964/// Floating-point reciprocal step
f2b60f7d
FG
13965///
13966/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)
17df50a5
XL
13967#[inline]
13968#[target_feature(enable = "neon")]
3c0e092e 13969#[cfg_attr(test, assert_instr(frecps))]
a2a8927a 13970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13971pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13972 #[allow(improper_ctypes)]
13973 extern "unadjusted" {
13974 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.v2f64")]
13975 fn vrecpsq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13976 }
13977 vrecpsq_f64_(a, b)
17df50a5
XL
13978}
13979
3c0e092e 13980/// Floating-point reciprocal step
f2b60f7d
FG
13981///
13982/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)
17df50a5
XL
13983#[inline]
13984#[target_feature(enable = "neon")]
3c0e092e 13985#[cfg_attr(test, assert_instr(frecps))]
a2a8927a 13986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
13987pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 {
13988 #[allow(improper_ctypes)]
13989 extern "unadjusted" {
13990 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.f32")]
13991 fn vrecpss_f32_(a: f32, b: f32) -> f32;
13992 }
13993 vrecpss_f32_(a, b)
17df50a5
XL
13994}
13995
3c0e092e 13996/// Floating-point reciprocal step
f2b60f7d
FG
13997///
13998/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)
17df50a5
XL
13999#[inline]
14000#[target_feature(enable = "neon")]
3c0e092e 14001#[cfg_attr(test, assert_instr(frecps))]
a2a8927a 14002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14003pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 {
14004 #[allow(improper_ctypes)]
14005 extern "unadjusted" {
14006 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.f64")]
14007 fn vrecpsd_f64_(a: f64, b: f64) -> f64;
14008 }
14009 vrecpsd_f64_(a, b)
17df50a5
XL
14010}
14011
3c0e092e 14012/// Floating-point reciprocal exponent
f2b60f7d
FG
14013///
14014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)
17df50a5
XL
14015#[inline]
14016#[target_feature(enable = "neon")]
3c0e092e 14017#[cfg_attr(test, assert_instr(frecpx))]
a2a8927a 14018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14019pub unsafe fn vrecpxs_f32(a: f32) -> f32 {
14020 #[allow(improper_ctypes)]
14021 extern "unadjusted" {
14022 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpx.f32")]
14023 fn vrecpxs_f32_(a: f32) -> f32;
14024 }
14025 vrecpxs_f32_(a)
17df50a5
XL
14026}
14027
3c0e092e 14028/// Floating-point reciprocal exponent
f2b60f7d
FG
14029///
14030/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)
17df50a5
XL
14031#[inline]
14032#[target_feature(enable = "neon")]
3c0e092e 14033#[cfg_attr(test, assert_instr(frecpx))]
a2a8927a 14034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14035pub unsafe fn vrecpxd_f64(a: f64) -> f64 {
14036 #[allow(improper_ctypes)]
14037 extern "unadjusted" {
14038 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpx.f64")]
14039 fn vrecpxd_f64_(a: f64) -> f64;
14040 }
14041 vrecpxd_f64_(a)
17df50a5
XL
14042}
14043
3c0e092e 14044/// Vector reinterpret cast operation
f2b60f7d
FG
14045///
14046/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)
17df50a5
XL
14047#[inline]
14048#[target_feature(enable = "neon")]
3c0e092e 14049#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14051pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
14052 transmute(a)
17df50a5
XL
14053}
14054
3c0e092e 14055/// Vector reinterpret cast operation
f2b60f7d
FG
14056///
14057/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)
17df50a5
XL
14058#[inline]
14059#[target_feature(enable = "neon")]
3c0e092e 14060#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14062pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
14063 transmute(a)
17df50a5
XL
14064}
14065
3c0e092e 14066/// Vector reinterpret cast operation
f2b60f7d
FG
14067///
14068/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)
17df50a5
XL
14069#[inline]
14070#[target_feature(enable = "neon")]
3c0e092e 14071#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14073pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
14074 transmute(a)
17df50a5
XL
14075}
14076
3c0e092e 14077/// Vector reinterpret cast operation
f2b60f7d
FG
14078///
14079/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)
17df50a5
XL
14080#[inline]
14081#[target_feature(enable = "neon")]
3c0e092e 14082#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14084pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
14085 transmute(a)
17df50a5
XL
14086}
14087
3c0e092e 14088/// Vector reinterpret cast operation
f2b60f7d
FG
14089///
14090/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)
17df50a5
XL
14091#[inline]
14092#[target_feature(enable = "neon")]
3c0e092e 14093#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14095pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
14096 transmute(a)
17df50a5
XL
14097}
14098
3c0e092e 14099/// Vector reinterpret cast operation
f2b60f7d
FG
14100///
14101/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)
17df50a5
XL
14102#[inline]
14103#[target_feature(enable = "neon")]
3c0e092e 14104#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14106pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
14107 transmute(a)
17df50a5
XL
14108}
14109
3c0e092e 14110/// Vector reinterpret cast operation
f2b60f7d
FG
14111///
14112/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)
17df50a5
XL
14113#[inline]
14114#[target_feature(enable = "neon")]
3c0e092e 14115#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14117pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
14118 transmute(a)
17df50a5
XL
14119}
14120
3c0e092e 14121/// Vector reinterpret cast operation
f2b60f7d
FG
14122///
14123/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)
17df50a5
XL
14124#[inline]
14125#[target_feature(enable = "neon")]
3c0e092e 14126#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14128pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
14129 transmute(a)
17df50a5
XL
14130}
14131
3c0e092e 14132/// Vector reinterpret cast operation
f2b60f7d
FG
14133///
14134/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)
17df50a5
XL
14135#[inline]
14136#[target_feature(enable = "neon")]
3c0e092e 14137#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14139pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
14140 transmute(a)
17df50a5
XL
14141}
14142
3c0e092e 14143/// Vector reinterpret cast operation
f2b60f7d
FG
14144///
14145/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)
17df50a5
XL
14146#[inline]
14147#[target_feature(enable = "neon")]
3c0e092e 14148#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14150pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
14151 transmute(a)
17df50a5
XL
14152}
14153
3c0e092e 14154/// Vector reinterpret cast operation
f2b60f7d
FG
14155///
14156/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)
17df50a5
XL
14157#[inline]
14158#[target_feature(enable = "neon")]
3c0e092e 14159#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14161pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
14162 transmute(a)
17df50a5
XL
14163}
14164
3c0e092e 14165/// Vector reinterpret cast operation
f2b60f7d
FG
14166///
14167/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)
17df50a5
XL
14168#[inline]
14169#[target_feature(enable = "neon")]
3c0e092e 14170#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14172pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
14173 transmute(a)
17df50a5
XL
14174}
14175
3c0e092e 14176/// Vector reinterpret cast operation
f2b60f7d
FG
14177///
14178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)
17df50a5
XL
14179#[inline]
14180#[target_feature(enable = "neon")]
3c0e092e 14181#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14183pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
14184 transmute(a)
17df50a5
XL
14185}
14186
3c0e092e 14187/// Vector reinterpret cast operation
f2b60f7d
FG
14188///
14189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)
17df50a5
XL
14190#[inline]
14191#[target_feature(enable = "neon")]
3c0e092e 14192#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14194pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
14195 transmute(a)
17df50a5
XL
14196}
14197
3c0e092e 14198/// Vector reinterpret cast operation
f2b60f7d
FG
14199///
14200/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)
17df50a5
XL
14201#[inline]
14202#[target_feature(enable = "neon")]
3c0e092e 14203#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14205pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
14206 transmute(a)
17df50a5
XL
14207}
14208
3c0e092e 14209/// Vector reinterpret cast operation
f2b60f7d
FG
14210///
14211/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)
17df50a5
XL
14212#[inline]
14213#[target_feature(enable = "neon")]
3c0e092e 14214#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14216pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
14217 transmute(a)
17df50a5
XL
14218}
14219
3c0e092e 14220/// Vector reinterpret cast operation
f2b60f7d
FG
14221///
14222/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)
17df50a5
XL
14223#[inline]
14224#[target_feature(enable = "neon")]
3c0e092e 14225#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14227pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
14228 transmute(a)
17df50a5
XL
14229}
14230
3c0e092e 14231/// Vector reinterpret cast operation
f2b60f7d
FG
14232///
14233/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)
17df50a5
XL
14234#[inline]
14235#[target_feature(enable = "neon")]
3c0e092e 14236#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14238pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
14239 transmute(a)
17df50a5
XL
14240}
14241
3c0e092e 14242/// Vector reinterpret cast operation
f2b60f7d
FG
14243///
14244/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)
17df50a5
XL
14245#[inline]
14246#[target_feature(enable = "neon")]
3c0e092e 14247#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14249pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
14250 transmute(a)
17df50a5
XL
14251}
14252
3c0e092e 14253/// Vector reinterpret cast operation
f2b60f7d
FG
14254///
14255/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)
17df50a5
XL
14256#[inline]
14257#[target_feature(enable = "neon")]
3c0e092e 14258#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14260pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
14261 transmute(a)
17df50a5
XL
14262}
14263
3c0e092e 14264/// Vector reinterpret cast operation
f2b60f7d
FG
14265///
14266/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)
17df50a5
XL
14267#[inline]
14268#[target_feature(enable = "neon")]
3c0e092e 14269#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14271pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
14272 transmute(a)
17df50a5
XL
14273}
14274
3c0e092e 14275/// Vector reinterpret cast operation
f2b60f7d
FG
14276///
14277/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)
17df50a5
XL
14278#[inline]
14279#[target_feature(enable = "neon")]
3c0e092e 14280#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14282pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
14283 transmute(a)
17df50a5
XL
14284}
14285
3c0e092e 14286/// Vector reinterpret cast operation
f2b60f7d
FG
14287///
14288/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)
17df50a5
XL
14289#[inline]
14290#[target_feature(enable = "neon")]
3c0e092e 14291#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14293pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
14294 transmute(a)
17df50a5
XL
14295}
14296
3c0e092e 14297/// Vector reinterpret cast operation
f2b60f7d
FG
14298///
14299/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)
17df50a5
XL
14300#[inline]
14301#[target_feature(enable = "neon")]
3c0e092e 14302#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14304pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
14305 transmute(a)
17df50a5
XL
14306}
14307
3c0e092e 14308/// Vector reinterpret cast operation
f2b60f7d
FG
14309///
14310/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)
17df50a5
XL
14311#[inline]
14312#[target_feature(enable = "neon")]
3c0e092e 14313#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14315pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
14316 transmute(a)
17df50a5
XL
14317}
14318
3c0e092e 14319/// Vector reinterpret cast operation
f2b60f7d
FG
14320///
14321/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)
17df50a5
XL
14322#[inline]
14323#[target_feature(enable = "neon")]
3c0e092e 14324#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14326pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
14327 transmute(a)
17df50a5
XL
14328}
14329
3c0e092e 14330/// Vector reinterpret cast operation
f2b60f7d
FG
14331///
14332/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)
17df50a5
XL
14333#[inline]
14334#[target_feature(enable = "neon")]
3c0e092e 14335#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14337pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
14338 transmute(a)
17df50a5
XL
14339}
14340
3c0e092e 14341/// Vector reinterpret cast operation
f2b60f7d
FG
14342///
14343/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)
17df50a5
XL
14344#[inline]
14345#[target_feature(enable = "neon")]
3c0e092e 14346#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14348pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
14349 transmute(a)
17df50a5
XL
14350}
14351
3c0e092e 14352/// Vector reinterpret cast operation
f2b60f7d
FG
14353///
14354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)
3c0e092e
XL
14355#[inline]
14356#[target_feature(enable = "neon")]
14357#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14359pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
14360 transmute(a)
14361}
17df50a5 14362
3c0e092e 14363/// Vector reinterpret cast operation
f2b60f7d
FG
14364///
14365/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)
3c0e092e
XL
14366#[inline]
14367#[target_feature(enable = "neon")]
14368#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14370pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
14371 transmute(a)
14372}
14373
14374/// Vector reinterpret cast operation
f2b60f7d
FG
14375///
14376/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)
3c0e092e
XL
14377#[inline]
14378#[target_feature(enable = "neon")]
14379#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14381pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
14382 transmute(a)
14383}
14384
14385/// Vector reinterpret cast operation
f2b60f7d
FG
14386///
14387/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)
3c0e092e
XL
14388#[inline]
14389#[target_feature(enable = "neon")]
14390#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14392pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
14393 transmute(a)
14394}
14395
14396/// Vector reinterpret cast operation
f2b60f7d
FG
14397///
14398/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)
3c0e092e
XL
14399#[inline]
14400#[target_feature(enable = "neon")]
14401#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14403pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
14404 transmute(a)
14405}
14406
14407/// Vector reinterpret cast operation
f2b60f7d
FG
14408///
14409/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)
3c0e092e
XL
14410#[inline]
14411#[target_feature(enable = "neon")]
14412#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14414pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
14415 transmute(a)
14416}
14417
14418/// Vector reinterpret cast operation
f2b60f7d
FG
14419///
14420/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)
3c0e092e
XL
14421#[inline]
14422#[target_feature(enable = "neon")]
14423#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14425pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
14426 transmute(a)
14427}
14428
14429/// Vector reinterpret cast operation
f2b60f7d
FG
14430///
14431/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)
3c0e092e
XL
14432#[inline]
14433#[target_feature(enable = "neon")]
14434#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14436pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
14437 transmute(a)
14438}
14439
14440/// Vector reinterpret cast operation
f2b60f7d
FG
14441///
14442/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)
3c0e092e
XL
14443#[inline]
14444#[target_feature(enable = "neon")]
14445#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14447pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
14448 transmute(a)
14449}
14450
14451/// Vector reinterpret cast operation
f2b60f7d
FG
14452///
14453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)
3c0e092e
XL
14454#[inline]
14455#[target_feature(enable = "neon")]
14456#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14458pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
14459 transmute(a)
14460}
14461
14462/// Vector reinterpret cast operation
f2b60f7d
FG
14463///
14464/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)
3c0e092e
XL
14465#[inline]
14466#[target_feature(enable = "neon")]
14467#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14469pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
14470 transmute(a)
14471}
14472
14473/// Vector reinterpret cast operation
f2b60f7d
FG
14474///
14475/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)
3c0e092e
XL
14476#[inline]
14477#[target_feature(enable = "neon")]
14478#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14480pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
14481 transmute(a)
14482}
14483
14484/// Vector reinterpret cast operation
f2b60f7d
FG
14485///
14486/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)
3c0e092e
XL
14487#[inline]
14488#[target_feature(enable = "neon")]
14489#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14491pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
14492 transmute(a)
14493}
14494
14495/// Vector reinterpret cast operation
f2b60f7d
FG
14496///
14497/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)
3c0e092e
XL
14498#[inline]
14499#[target_feature(enable = "neon")]
14500#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14502pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
14503 transmute(a)
14504}
14505
14506/// Vector reinterpret cast operation
f2b60f7d
FG
14507///
14508/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)
3c0e092e
XL
14509#[inline]
14510#[target_feature(enable = "neon")]
14511#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14513pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
14514 transmute(a)
14515}
14516
14517/// Vector reinterpret cast operation
f2b60f7d
FG
14518///
14519/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)
3c0e092e
XL
14520#[inline]
14521#[target_feature(enable = "neon")]
14522#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14524pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
14525 transmute(a)
14526}
14527
14528/// Vector reinterpret cast operation
f2b60f7d
FG
14529///
14530/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)
3c0e092e
XL
14531#[inline]
14532#[target_feature(enable = "neon")]
14533#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14535pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
14536 transmute(a)
14537}
14538
14539/// Vector reinterpret cast operation
f2b60f7d
FG
14540///
14541/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)
3c0e092e
XL
14542#[inline]
14543#[target_feature(enable = "neon")]
14544#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14546pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
14547 transmute(a)
14548}
14549
14550/// Vector reinterpret cast operation
f2b60f7d
FG
14551///
14552/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)
3c0e092e
XL
14553#[inline]
14554#[target_feature(enable = "neon")]
14555#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14557pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
14558 transmute(a)
14559}
14560
14561/// Vector reinterpret cast operation
f2b60f7d
FG
14562///
14563/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)
3c0e092e
XL
14564#[inline]
14565#[target_feature(enable = "neon")]
14566#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14568pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
14569 transmute(a)
14570}
14571
14572/// Vector reinterpret cast operation
f2b60f7d
FG
14573///
14574/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)
3c0e092e
XL
14575#[inline]
14576#[target_feature(enable = "neon")]
14577#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14579pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
14580 transmute(a)
14581}
14582
14583/// Vector reinterpret cast operation
f2b60f7d
FG
14584///
14585/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)
3c0e092e
XL
14586#[inline]
14587#[target_feature(enable = "neon")]
14588#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14590pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
14591 transmute(a)
14592}
14593
14594/// Vector reinterpret cast operation
f2b60f7d
FG
14595///
14596/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)
3c0e092e
XL
14597#[inline]
14598#[target_feature(enable = "neon")]
14599#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14601pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
14602 transmute(a)
14603}
14604
14605/// Vector reinterpret cast operation
f2b60f7d
FG
14606///
14607/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)
3c0e092e
XL
14608#[inline]
14609#[target_feature(enable = "neon")]
14610#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14612pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
14613 transmute(a)
14614}
14615
14616/// Vector reinterpret cast operation
f2b60f7d
FG
14617///
14618/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)
3c0e092e
XL
14619#[inline]
14620#[target_feature(enable = "neon")]
14621#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14623pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
14624 transmute(a)
14625}
14626
14627/// Vector reinterpret cast operation
f2b60f7d
FG
14628///
14629/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)
3c0e092e
XL
14630#[inline]
14631#[target_feature(enable = "neon")]
14632#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14634pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
14635 transmute(a)
14636}
14637
14638/// Vector reinterpret cast operation
f2b60f7d
FG
14639///
14640/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)
3c0e092e
XL
14641#[inline]
14642#[target_feature(enable = "neon")]
14643#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14645pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
14646 transmute(a)
14647}
14648
14649/// Vector reinterpret cast operation
f2b60f7d
FG
14650///
14651/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)
3c0e092e
XL
14652#[inline]
14653#[target_feature(enable = "neon")]
14654#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14656pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
14657 transmute(a)
14658}
14659
14660/// Vector reinterpret cast operation
f2b60f7d
FG
14661///
14662/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)
3c0e092e
XL
14663#[inline]
14664#[target_feature(enable = "neon")]
14665#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14667pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
14668 transmute(a)
14669}
14670
14671/// Vector reinterpret cast operation
f2b60f7d
FG
14672///
14673/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)
3c0e092e
XL
14674#[inline]
14675#[target_feature(enable = "neon")]
14676#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14678pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
14679 transmute(a)
14680}
14681
14682/// Vector reinterpret cast operation
f2b60f7d
FG
14683///
14684/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)
3c0e092e
XL
14685#[inline]
14686#[target_feature(enable = "neon")]
14687#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14689pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
14690 transmute(a)
14691}
14692
14693/// Vector reinterpret cast operation
f2b60f7d
FG
14694///
14695/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)
3c0e092e
XL
14696#[inline]
14697#[target_feature(enable = "neon")]
14698#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14700pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
14701 transmute(a)
14702}
14703
14704/// Vector reinterpret cast operation
f2b60f7d
FG
14705///
14706/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)
3c0e092e
XL
14707#[inline]
14708#[target_feature(enable = "neon")]
14709#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14711pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
14712 transmute(a)
14713}
14714
14715/// Vector reinterpret cast operation
f2b60f7d
FG
14716///
14717/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)
3c0e092e
XL
14718#[inline]
14719#[target_feature(enable = "neon")]
14720#[cfg_attr(test, assert_instr(nop))]
a2a8927a 14721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14722pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
14723 transmute(a)
14724}
14725
14726/// Signed rounding shift left
f2b60f7d
FG
14727///
14728/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)
3c0e092e
XL
14729#[inline]
14730#[target_feature(enable = "neon")]
14731#[cfg_attr(test, assert_instr(srshl))]
a2a8927a 14732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14733pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 {
14734 #[allow(improper_ctypes)]
14735 extern "unadjusted" {
14736 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.i64")]
14737 fn vrshld_s64_(a: i64, b: i64) -> i64;
14738 }
14739 vrshld_s64_(a, b)
14740}
14741
14742/// Unsigned rounding shift left
f2b60f7d
FG
14743///
14744/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)
3c0e092e
XL
14745#[inline]
14746#[target_feature(enable = "neon")]
14747#[cfg_attr(test, assert_instr(urshl))]
a2a8927a 14748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14749pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 {
14750 #[allow(improper_ctypes)]
14751 extern "unadjusted" {
14752 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.i64")]
14753 fn vrshld_u64_(a: u64, b: i64) -> u64;
14754 }
14755 vrshld_u64_(a, b)
14756}
14757
14758/// Signed rounding shift right
f2b60f7d
FG
14759///
14760/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)
3c0e092e
XL
14761#[inline]
14762#[target_feature(enable = "neon")]
14763#[cfg_attr(test, assert_instr(srshr, N = 2))]
14764#[rustc_legacy_const_generics(1)]
a2a8927a 14765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14766pub unsafe fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
353b0b11 14767 static_assert!(N >= 1 && N <= 64);
3c0e092e
XL
14768 vrshld_s64(a, -N as i64)
14769}
14770
14771/// Unsigned rounding shift right
f2b60f7d
FG
14772///
14773/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)
3c0e092e
XL
14774#[inline]
14775#[target_feature(enable = "neon")]
14776#[cfg_attr(test, assert_instr(urshr, N = 2))]
14777#[rustc_legacy_const_generics(1)]
a2a8927a 14778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14779pub unsafe fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
353b0b11 14780 static_assert!(N >= 1 && N <= 64);
3c0e092e
XL
14781 vrshld_u64(a, -N as i64)
14782}
14783
14784/// Rounding shift right narrow
f2b60f7d
FG
14785///
14786/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)
3c0e092e
XL
14787#[inline]
14788#[target_feature(enable = "neon")]
14789#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14790#[rustc_legacy_const_generics(2)]
a2a8927a 14791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14792pub unsafe fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
353b0b11
FG
14793 static_assert!(N >= 1 && N <= 8);
14794 simd_shuffle!(a, vrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
14795}
14796
14797/// Rounding shift right narrow
f2b60f7d
FG
14798///
14799/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)
3c0e092e
XL
14800#[inline]
14801#[target_feature(enable = "neon")]
14802#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14803#[rustc_legacy_const_generics(2)]
a2a8927a 14804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14805pub unsafe fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
353b0b11
FG
14806 static_assert!(N >= 1 && N <= 16);
14807 simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
14808}
14809
14810/// Rounding shift right narrow
f2b60f7d
FG
14811///
14812/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)
3c0e092e
XL
14813#[inline]
14814#[target_feature(enable = "neon")]
14815#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14816#[rustc_legacy_const_generics(2)]
a2a8927a 14817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14818pub unsafe fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
353b0b11
FG
14819 static_assert!(N >= 1 && N <= 32);
14820 simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3])
3c0e092e
XL
14821}
14822
14823/// Rounding shift right narrow
f2b60f7d
FG
14824///
14825/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)
3c0e092e
XL
14826#[inline]
14827#[target_feature(enable = "neon")]
14828#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14829#[rustc_legacy_const_generics(2)]
a2a8927a 14830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14831pub unsafe fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
353b0b11
FG
14832 static_assert!(N >= 1 && N <= 8);
14833 simd_shuffle!(a, vrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
14834}
14835
14836/// Rounding shift right narrow
f2b60f7d
FG
14837///
14838/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)
3c0e092e
XL
14839#[inline]
14840#[target_feature(enable = "neon")]
14841#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14842#[rustc_legacy_const_generics(2)]
a2a8927a 14843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14844pub unsafe fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
353b0b11
FG
14845 static_assert!(N >= 1 && N <= 16);
14846 simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
14847}
14848
14849/// Rounding shift right narrow
f2b60f7d
FG
14850///
14851/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)
3c0e092e
XL
14852#[inline]
14853#[target_feature(enable = "neon")]
14854#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14855#[rustc_legacy_const_generics(2)]
a2a8927a 14856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14857pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
353b0b11
FG
14858 static_assert!(N >= 1 && N <= 32);
14859 simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3])
3c0e092e
XL
14860}
14861
14862/// Signed rounding shift right and accumulate.
f2b60f7d
FG
14863///
14864/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)
3c0e092e
XL
14865#[inline]
14866#[target_feature(enable = "neon")]
14867#[cfg_attr(test, assert_instr(srsra, N = 2))]
14868#[rustc_legacy_const_generics(2)]
a2a8927a 14869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14870pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
353b0b11 14871 static_assert!(N >= 1 && N <= 64);
3c0e092e 14872 let b: i64 = vrshrd_n_s64::<N>(b);
a2a8927a 14873 a.wrapping_add(b)
3c0e092e
XL
14874}
14875
14876/// Ungisned rounding shift right and accumulate.
f2b60f7d
FG
14877///
14878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)
3c0e092e
XL
14879#[inline]
14880#[target_feature(enable = "neon")]
14881#[cfg_attr(test, assert_instr(ursra, N = 2))]
14882#[rustc_legacy_const_generics(2)]
a2a8927a 14883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14884pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
353b0b11 14885 static_assert!(N >= 1 && N <= 64);
3c0e092e 14886 let b: u64 = vrshrd_n_u64::<N>(b);
a2a8927a 14887 a.wrapping_add(b)
3c0e092e
XL
14888}
14889
14890/// Rounding subtract returning high narrow
f2b60f7d
FG
14891///
14892/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)
3c0e092e
XL
14893#[inline]
14894#[target_feature(enable = "neon")]
14895#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14897pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
14898 let x: int8x8_t = vrsubhn_s16(b, c);
353b0b11 14899 simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
14900}
14901
14902/// Rounding subtract returning high narrow
f2b60f7d
FG
14903///
14904/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)
3c0e092e
XL
14905#[inline]
14906#[target_feature(enable = "neon")]
14907#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14909pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
14910 let x: int16x4_t = vrsubhn_s32(b, c);
353b0b11 14911 simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
14912}
14913
14914/// Rounding subtract returning high narrow
f2b60f7d
FG
14915///
14916/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)
3c0e092e
XL
14917#[inline]
14918#[target_feature(enable = "neon")]
14919#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14921pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
14922 let x: int32x2_t = vrsubhn_s64(b, c);
353b0b11 14923 simd_shuffle!(a, x, [0, 1, 2, 3])
3c0e092e
XL
14924}
14925
14926/// Rounding subtract returning high narrow
f2b60f7d
FG
14927///
14928/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)
3c0e092e
XL
14929#[inline]
14930#[target_feature(enable = "neon")]
14931#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14933pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
14934 let x: uint8x8_t = vrsubhn_u16(b, c);
353b0b11 14935 simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
14936}
14937
14938/// Rounding subtract returning high narrow
f2b60f7d
FG
14939///
14940/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)
3c0e092e
XL
14941#[inline]
14942#[target_feature(enable = "neon")]
14943#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14945pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
14946 let x: uint16x4_t = vrsubhn_u32(b, c);
353b0b11 14947 simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
14948}
14949
14950/// Rounding subtract returning high narrow
f2b60f7d
FG
14951///
14952/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)
3c0e092e
XL
14953#[inline]
14954#[target_feature(enable = "neon")]
14955#[cfg_attr(test, assert_instr(rsubhn2))]
a2a8927a 14956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14957pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
14958 let x: uint32x2_t = vrsubhn_u64(b, c);
353b0b11 14959 simd_shuffle!(a, x, [0, 1, 2, 3])
3c0e092e
XL
14960}
14961
14962/// Insert vector element from another vector element
f2b60f7d
FG
14963///
14964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)
3c0e092e
XL
14965#[inline]
14966#[target_feature(enable = "neon")]
14967#[cfg_attr(test, assert_instr(nop, LANE = 0))]
14968#[rustc_legacy_const_generics(2)]
a2a8927a 14969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14970pub unsafe fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
353b0b11 14971 static_assert!(LANE == 0);
3c0e092e
XL
14972 simd_insert(b, LANE as u32, a)
14973}
14974
14975/// Insert vector element from another vector element
f2b60f7d
FG
14976///
14977/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)
3c0e092e
XL
14978#[inline]
14979#[target_feature(enable = "neon")]
14980#[cfg_attr(test, assert_instr(nop, LANE = 0))]
14981#[rustc_legacy_const_generics(2)]
a2a8927a 14982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 14983pub unsafe fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
353b0b11 14984 static_assert_uimm_bits!(LANE, 1);
3c0e092e
XL
14985 simd_insert(b, LANE as u32, a)
14986}
14987
14988/// Signed Shift left
f2b60f7d
FG
14989///
14990/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)
3c0e092e
XL
14991#[inline]
14992#[target_feature(enable = "neon")]
14993#[cfg_attr(test, assert_instr(sshl))]
a2a8927a 14994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
14995pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 {
14996 transmute(vshl_s64(transmute(a), transmute(b)))
14997}
14998
14999/// Unsigned Shift left
f2b60f7d
FG
15000///
15001/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)
3c0e092e
XL
15002#[inline]
15003#[target_feature(enable = "neon")]
15004#[cfg_attr(test, assert_instr(ushl))]
a2a8927a 15005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
15006pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 {
15007 transmute(vshl_u64(transmute(a), transmute(b)))
15008}
15009
15010/// Signed shift left long
f2b60f7d
FG
15011///
15012/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)
3c0e092e
XL
15013#[inline]
15014#[target_feature(enable = "neon")]
15015#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15016#[rustc_legacy_const_generics(1)]
a2a8927a 15017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15018pub unsafe fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
353b0b11
FG
15019 static_assert!(N >= 0 && N <= 8);
15020 let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
15021 vshll_n_s8::<N>(b)
15022}
15023
15024/// Signed shift left long
f2b60f7d
FG
15025///
15026/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)
3c0e092e
XL
15027#[inline]
15028#[target_feature(enable = "neon")]
15029#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15030#[rustc_legacy_const_generics(1)]
a2a8927a 15031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15032pub unsafe fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
353b0b11
FG
15033 static_assert!(N >= 0 && N <= 16);
15034 let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e
XL
15035 vshll_n_s16::<N>(b)
15036}
15037
15038/// Signed shift left long
f2b60f7d
FG
15039///
15040/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)
3c0e092e
XL
15041#[inline]
15042#[target_feature(enable = "neon")]
15043#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15044#[rustc_legacy_const_generics(1)]
a2a8927a 15045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15046pub unsafe fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
353b0b11
FG
15047 static_assert!(N >= 0 && N <= 32);
15048 let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e
XL
15049 vshll_n_s32::<N>(b)
15050}
15051
15052/// Signed shift left long
f2b60f7d
FG
15053///
15054/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)
3c0e092e
XL
15055#[inline]
15056#[target_feature(enable = "neon")]
15057#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15058#[rustc_legacy_const_generics(1)]
a2a8927a 15059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15060pub unsafe fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
353b0b11
FG
15061 static_assert!(N >= 0 && N <= 8);
15062 let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
15063 vshll_n_u8::<N>(b)
15064}
15065
15066/// Signed shift left long
f2b60f7d
FG
15067///
15068/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)
3c0e092e
XL
15069#[inline]
15070#[target_feature(enable = "neon")]
15071#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15072#[rustc_legacy_const_generics(1)]
a2a8927a 15073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15074pub unsafe fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
353b0b11
FG
15075 static_assert!(N >= 0 && N <= 16);
15076 let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
3c0e092e
XL
15077 vshll_n_u16::<N>(b)
15078}
15079
15080/// Signed shift left long
f2b60f7d
FG
15081///
15082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)
3c0e092e
XL
15083#[inline]
15084#[target_feature(enable = "neon")]
15085#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15086#[rustc_legacy_const_generics(1)]
a2a8927a 15087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15088pub unsafe fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
353b0b11
FG
15089 static_assert!(N >= 0 && N <= 32);
15090 let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
3c0e092e
XL
15091 vshll_n_u32::<N>(b)
15092}
15093
15094/// Shift right narrow
f2b60f7d
FG
15095///
15096/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)
3c0e092e
XL
15097#[inline]
15098#[target_feature(enable = "neon")]
15099#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15100#[rustc_legacy_const_generics(2)]
a2a8927a 15101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15102pub unsafe fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
353b0b11
FG
15103 static_assert!(N >= 1 && N <= 8);
15104 simd_shuffle!(a, vshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
15105}
15106
15107/// Shift right narrow
f2b60f7d
FG
15108///
15109/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)
3c0e092e
XL
15110#[inline]
15111#[target_feature(enable = "neon")]
15112#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15113#[rustc_legacy_const_generics(2)]
a2a8927a 15114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15115pub unsafe fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
353b0b11
FG
15116 static_assert!(N >= 1 && N <= 16);
15117 simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
15118}
15119
15120/// Shift right narrow
f2b60f7d
FG
15121///
15122/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)
3c0e092e
XL
15123#[inline]
15124#[target_feature(enable = "neon")]
15125#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15126#[rustc_legacy_const_generics(2)]
a2a8927a 15127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15128pub unsafe fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
353b0b11
FG
15129 static_assert!(N >= 1 && N <= 32);
15130 simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3])
3c0e092e
XL
15131}
15132
15133/// Shift right narrow
f2b60f7d
FG
15134///
15135/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)
3c0e092e
XL
15136#[inline]
15137#[target_feature(enable = "neon")]
15138#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15139#[rustc_legacy_const_generics(2)]
a2a8927a 15140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15141pub unsafe fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
353b0b11
FG
15142 static_assert!(N >= 1 && N <= 8);
15143 simd_shuffle!(a, vshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
3c0e092e
XL
15144}
15145
15146/// Shift right narrow
f2b60f7d
FG
15147///
15148/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)
3c0e092e
XL
15149#[inline]
15150#[target_feature(enable = "neon")]
15151#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15152#[rustc_legacy_const_generics(2)]
a2a8927a 15153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15154pub unsafe fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
353b0b11
FG
15155 static_assert!(N >= 1 && N <= 16);
15156 simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
3c0e092e
XL
15157}
15158
15159/// Shift right narrow
f2b60f7d
FG
15160///
15161/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)
3c0e092e
XL
15162#[inline]
15163#[target_feature(enable = "neon")]
15164#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15165#[rustc_legacy_const_generics(2)]
a2a8927a 15166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15167pub unsafe fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
353b0b11
FG
15168 static_assert!(N >= 1 && N <= 32);
15169 simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3])
3c0e092e
XL
15170}
15171
15172/// SM3PARTW1
f2b60f7d
FG
15173///
15174/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)
3c0e092e
XL
15175#[inline]
15176#[target_feature(enable = "neon,sm4")]
15177#[cfg_attr(test, assert_instr(sm3partw1))]
15178pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15179 #[allow(improper_ctypes)]
15180 extern "unadjusted" {
15181 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3partw1")]
15182 fn vsm3partw1q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15183 }
15184 vsm3partw1q_u32_(a, b, c)
15185}
15186
15187/// SM3PARTW2
f2b60f7d
FG
15188///
15189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)
3c0e092e
XL
15190#[inline]
15191#[target_feature(enable = "neon,sm4")]
15192#[cfg_attr(test, assert_instr(sm3partw2))]
15193pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15194 #[allow(improper_ctypes)]
15195 extern "unadjusted" {
15196 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3partw2")]
15197 fn vsm3partw2q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15198 }
15199 vsm3partw2q_u32_(a, b, c)
15200}
15201
15202/// SM3SS1
f2b60f7d
FG
15203///
15204/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)
3c0e092e
XL
15205#[inline]
15206#[target_feature(enable = "neon,sm4")]
15207#[cfg_attr(test, assert_instr(sm3ss1))]
15208pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15209 #[allow(improper_ctypes)]
15210 extern "unadjusted" {
15211 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3ss1")]
15212 fn vsm3ss1q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15213 }
15214 vsm3ss1q_u32_(a, b, c)
15215}
15216
15217/// SM4 key
f2b60f7d
FG
15218///
15219/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)
3c0e092e
XL
15220#[inline]
15221#[target_feature(enable = "neon,sm4")]
15222#[cfg_attr(test, assert_instr(sm4ekey))]
15223pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15224 #[allow(improper_ctypes)]
15225 extern "unadjusted" {
15226 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm4ekey")]
15227 fn vsm4ekeyq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
15228 }
15229 vsm4ekeyq_u32_(a, b)
15230}
15231
15232/// SM4 encode
f2b60f7d
FG
15233///
15234/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)
3c0e092e
XL
15235#[inline]
15236#[target_feature(enable = "neon,sm4")]
15237#[cfg_attr(test, assert_instr(sm4e))]
15238pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15239 #[allow(improper_ctypes)]
15240 extern "unadjusted" {
15241 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm4e")]
15242 fn vsm4eq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
15243 }
15244 vsm4eq_u32_(a, b)
15245}
15246
15247/// Rotate and exclusive OR
f2b60f7d
FG
15248///
15249/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)
3c0e092e
XL
15250#[inline]
15251#[target_feature(enable = "neon,sha3")]
15252#[cfg_attr(test, assert_instr(rax1))]
15253pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15254 #[allow(improper_ctypes)]
15255 extern "unadjusted" {
15256 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.rax1")]
15257 fn vrax1q_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
15258 }
15259 vrax1q_u64_(a, b)
15260}
15261
15262/// SHA512 hash update part 1
f2b60f7d
FG
15263///
15264/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)
3c0e092e
XL
15265#[inline]
15266#[target_feature(enable = "neon,sha3")]
15267#[cfg_attr(test, assert_instr(sha512h))]
15268pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15269 #[allow(improper_ctypes)]
15270 extern "unadjusted" {
15271 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha512h")]
15272 fn vsha512hq_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15273 }
15274 vsha512hq_u64_(a, b, c)
15275}
15276
15277/// SHA512 hash update part 2
f2b60f7d
FG
15278///
15279/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)
3c0e092e
XL
15280#[inline]
15281#[target_feature(enable = "neon,sha3")]
15282#[cfg_attr(test, assert_instr(sha512h2))]
15283pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15284 #[allow(improper_ctypes)]
15285 extern "unadjusted" {
15286 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha512h2")]
15287 fn vsha512h2q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15288 }
15289 vsha512h2q_u64_(a, b, c)
15290}
15291
15292/// SHA512 schedule update 0
f2b60f7d
FG
15293///
15294/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)
3c0e092e
XL
15295#[inline]
15296#[target_feature(enable = "neon,sha3")]
15297#[cfg_attr(test, assert_instr(sha512su0))]
15298pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15299 #[allow(improper_ctypes)]
15300 extern "unadjusted" {
15301 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha512su0")]
15302 fn vsha512su0q_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
15303 }
15304 vsha512su0q_u64_(a, b)
15305}
15306
15307/// SHA512 schedule update 1
f2b60f7d
FG
15308///
15309/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)
3c0e092e
XL
15310#[inline]
15311#[target_feature(enable = "neon,sha3")]
15312#[cfg_attr(test, assert_instr(sha512su1))]
15313pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15314 #[allow(improper_ctypes)]
15315 extern "unadjusted" {
15316 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha512su1")]
15317 fn vsha512su1q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15318 }
15319 vsha512su1q_u64_(a, b, c)
15320}
15321
15322/// Floating-point round to 32-bit integer, using current rounding mode
f2b60f7d
FG
15323///
15324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)
3c0e092e
XL
15325#[inline]
15326#[target_feature(enable = "neon,frintts")]
15327#[cfg_attr(test, assert_instr(frint32x))]
15328pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
15329 #[allow(improper_ctypes)]
15330 extern "unadjusted" {
15331 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32x.v2f32")]
15332 fn vrnd32x_f32_(a: float32x2_t) -> float32x2_t;
15333 }
15334 vrnd32x_f32_(a)
15335}
15336
15337/// Floating-point round to 32-bit integer, using current rounding mode
f2b60f7d
FG
15338///
15339/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)
3c0e092e
XL
15340#[inline]
15341#[target_feature(enable = "neon,frintts")]
15342#[cfg_attr(test, assert_instr(frint32x))]
15343pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
15344 #[allow(improper_ctypes)]
15345 extern "unadjusted" {
15346 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32x.v4f32")]
15347 fn vrnd32xq_f32_(a: float32x4_t) -> float32x4_t;
15348 }
15349 vrnd32xq_f32_(a)
15350}
15351
15352/// Floating-point round to 32-bit integer toward zero
f2b60f7d
FG
15353///
15354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)
3c0e092e
XL
15355#[inline]
15356#[target_feature(enable = "neon,frintts")]
15357#[cfg_attr(test, assert_instr(frint32z))]
15358pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
15359 #[allow(improper_ctypes)]
15360 extern "unadjusted" {
15361 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32z.v2f32")]
15362 fn vrnd32z_f32_(a: float32x2_t) -> float32x2_t;
15363 }
15364 vrnd32z_f32_(a)
15365}
15366
15367/// Floating-point round to 32-bit integer toward zero
f2b60f7d
FG
15368///
15369/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)
3c0e092e
XL
15370#[inline]
15371#[target_feature(enable = "neon,frintts")]
15372#[cfg_attr(test, assert_instr(frint32z))]
15373pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
15374 #[allow(improper_ctypes)]
15375 extern "unadjusted" {
15376 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32z.v4f32")]
15377 fn vrnd32zq_f32_(a: float32x4_t) -> float32x4_t;
15378 }
15379 vrnd32zq_f32_(a)
15380}
15381
15382/// Floating-point round to 64-bit integer, using current rounding mode
f2b60f7d
FG
15383///
15384/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)
3c0e092e
XL
15385#[inline]
15386#[target_feature(enable = "neon,frintts")]
15387#[cfg_attr(test, assert_instr(frint64x))]
15388pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
15389 #[allow(improper_ctypes)]
15390 extern "unadjusted" {
15391 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64x.v2f32")]
15392 fn vrnd64x_f32_(a: float32x2_t) -> float32x2_t;
15393 }
15394 vrnd64x_f32_(a)
15395}
15396
15397/// Floating-point round to 64-bit integer, using current rounding mode
f2b60f7d
FG
15398///
15399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)
3c0e092e
XL
15400#[inline]
15401#[target_feature(enable = "neon,frintts")]
15402#[cfg_attr(test, assert_instr(frint64x))]
15403pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
15404 #[allow(improper_ctypes)]
15405 extern "unadjusted" {
15406 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64x.v4f32")]
15407 fn vrnd64xq_f32_(a: float32x4_t) -> float32x4_t;
15408 }
15409 vrnd64xq_f32_(a)
15410}
15411
15412/// Floating-point round to 64-bit integer toward zero
f2b60f7d
FG
15413///
15414/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)
3c0e092e
XL
15415#[inline]
15416#[target_feature(enable = "neon,frintts")]
15417#[cfg_attr(test, assert_instr(frint64z))]
15418pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
15419 #[allow(improper_ctypes)]
15420 extern "unadjusted" {
15421 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64z.v2f32")]
15422 fn vrnd64z_f32_(a: float32x2_t) -> float32x2_t;
15423 }
15424 vrnd64z_f32_(a)
15425}
15426
15427/// Floating-point round to 64-bit integer toward zero
f2b60f7d
FG
15428///
15429/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)
3c0e092e
XL
15430#[inline]
15431#[target_feature(enable = "neon,frintts")]
15432#[cfg_attr(test, assert_instr(frint64z))]
15433pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
15434 #[allow(improper_ctypes)]
15435 extern "unadjusted" {
15436 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64z.v4f32")]
15437 fn vrnd64zq_f32_(a: float32x4_t) -> float32x4_t;
15438 }
15439 vrnd64zq_f32_(a)
15440}
15441
15442/// Transpose vectors
f2b60f7d
FG
15443///
15444/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)
3c0e092e
XL
15445#[inline]
15446#[target_feature(enable = "neon")]
15447#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15449pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 15450 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15451}
15452
15453/// Transpose vectors
f2b60f7d
FG
15454///
15455/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)
3c0e092e
XL
15456#[inline]
15457#[target_feature(enable = "neon")]
15458#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15460pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 15461 simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
3c0e092e
XL
15462}
15463
15464/// Transpose vectors
f2b60f7d
FG
15465///
15466/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)
3c0e092e
XL
15467#[inline]
15468#[target_feature(enable = "neon")]
15469#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15471pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 15472 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15473}
15474
15475/// Transpose vectors
f2b60f7d
FG
15476///
15477/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)
3c0e092e
XL
15478#[inline]
15479#[target_feature(enable = "neon")]
15480#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15482pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 15483 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15484}
15485
15486/// Transpose vectors
f2b60f7d
FG
15487///
15488/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)
3c0e092e
XL
15489#[inline]
15490#[target_feature(enable = "neon")]
15491#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15493pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 15494 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15495}
15496
15497/// Transpose vectors
f2b60f7d
FG
15498///
15499/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)
3c0e092e
XL
15500#[inline]
15501#[target_feature(enable = "neon")]
15502#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15504pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 15505 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15506}
15507
15508/// Transpose vectors
f2b60f7d
FG
15509///
15510/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)
3c0e092e
XL
15511#[inline]
15512#[target_feature(enable = "neon")]
15513#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15515pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 15516 simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
3c0e092e
XL
15517}
15518
15519/// Transpose vectors
f2b60f7d
FG
15520///
15521/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)
3c0e092e
XL
15522#[inline]
15523#[target_feature(enable = "neon")]
15524#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15526pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 15527 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15528}
15529
15530/// Transpose vectors
f2b60f7d
FG
15531///
15532/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)
3c0e092e
XL
15533#[inline]
15534#[target_feature(enable = "neon")]
15535#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15537pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 15538 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15539}
15540
15541/// Transpose vectors
f2b60f7d
FG
15542///
15543/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)
3c0e092e
XL
15544#[inline]
15545#[target_feature(enable = "neon")]
15546#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15548pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 15549 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15550}
15551
15552/// Transpose vectors
f2b60f7d
FG
15553///
15554/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)
3c0e092e
XL
15555#[inline]
15556#[target_feature(enable = "neon")]
15557#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15559pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 15560 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15561}
15562
15563/// Transpose vectors
f2b60f7d
FG
15564///
15565/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)
3c0e092e
XL
15566#[inline]
15567#[target_feature(enable = "neon")]
15568#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15570pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 15571 simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
3c0e092e
XL
15572}
15573
15574/// Transpose vectors
f2b60f7d
FG
15575///
15576/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)
3c0e092e
XL
15577#[inline]
15578#[target_feature(enable = "neon")]
15579#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15581pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 15582 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15583}
15584
15585/// Transpose vectors
f2b60f7d
FG
15586///
15587/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)
3c0e092e
XL
15588#[inline]
15589#[target_feature(enable = "neon")]
15590#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15592pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 15593 simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
3c0e092e
XL
15594}
15595
15596/// Transpose vectors
f2b60f7d
FG
15597///
15598/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)
3c0e092e
XL
15599#[inline]
15600#[target_feature(enable = "neon")]
15601#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15603pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 15604 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15605}
15606
15607/// Transpose vectors
f2b60f7d
FG
15608///
15609/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)
3c0e092e
XL
15610#[inline]
15611#[target_feature(enable = "neon")]
15612#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15614pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 15615 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15616}
15617
15618/// Transpose vectors
f2b60f7d
FG
15619///
15620/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)
3c0e092e
XL
15621#[inline]
15622#[target_feature(enable = "neon")]
15623#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15625pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 15626 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15627}
15628
15629/// Transpose vectors
f2b60f7d
FG
15630///
15631/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)
3c0e092e
XL
15632#[inline]
15633#[target_feature(enable = "neon")]
15634#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15636pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 15637 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15638}
15639
15640/// Transpose vectors
f2b60f7d
FG
15641///
15642/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)
3c0e092e
XL
15643#[inline]
15644#[target_feature(enable = "neon")]
15645#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15647pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 15648 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15649}
15650
15651/// Transpose vectors
f2b60f7d
FG
15652///
15653/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)
3c0e092e
XL
15654#[inline]
15655#[target_feature(enable = "neon")]
15656#[cfg_attr(test, assert_instr(trn1))]
a2a8927a 15657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15658pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 15659 simd_shuffle!(a, b, [0, 4, 2, 6])
3c0e092e
XL
15660}
15661
15662/// Transpose vectors
f2b60f7d
FG
15663///
15664/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)
3c0e092e
XL
15665#[inline]
15666#[target_feature(enable = "neon")]
15667#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15669pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 15670 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15671}
15672
15673/// Transpose vectors
f2b60f7d
FG
15674///
15675/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)
3c0e092e
XL
15676#[inline]
15677#[target_feature(enable = "neon")]
15678#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15680pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 15681 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15682}
15683
15684/// Transpose vectors
f2b60f7d
FG
15685///
15686/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)
3c0e092e
XL
15687#[inline]
15688#[target_feature(enable = "neon")]
15689#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15691pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 15692 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15693}
15694
15695/// Transpose vectors
f2b60f7d
FG
15696///
15697/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)
3c0e092e
XL
15698#[inline]
15699#[target_feature(enable = "neon")]
15700#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15702pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 15703 simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
3c0e092e
XL
15704}
15705
15706/// Transpose vectors
f2b60f7d
FG
15707///
15708/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)
3c0e092e
XL
15709#[inline]
15710#[target_feature(enable = "neon")]
15711#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15713pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 15714 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15715}
15716
15717/// Transpose vectors
f2b60f7d
FG
15718///
15719/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)
3c0e092e
XL
15720#[inline]
15721#[target_feature(enable = "neon")]
15722#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15724pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 15725 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15726}
15727
15728/// Transpose vectors
f2b60f7d
FG
15729///
15730/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)
3c0e092e
XL
15731#[inline]
15732#[target_feature(enable = "neon")]
15733#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15735pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 15736 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15737}
15738
15739/// Transpose vectors
f2b60f7d
FG
15740///
15741/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)
3c0e092e
XL
15742#[inline]
15743#[target_feature(enable = "neon")]
15744#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15746pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 15747 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15748}
15749
15750/// Transpose vectors
f2b60f7d
FG
15751///
15752/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)
3c0e092e
XL
15753#[inline]
15754#[target_feature(enable = "neon")]
15755#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15757pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 15758 simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
3c0e092e
XL
15759}
15760
15761/// Transpose vectors
f2b60f7d
FG
15762///
15763/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)
3c0e092e
XL
15764#[inline]
15765#[target_feature(enable = "neon")]
15766#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15768pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 15769 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15770}
15771
15772/// Transpose vectors
f2b60f7d
FG
15773///
15774/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)
3c0e092e
XL
15775#[inline]
15776#[target_feature(enable = "neon")]
15777#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15779pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 15780 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15781}
15782
15783/// Transpose vectors
f2b60f7d
FG
15784///
15785/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)
3c0e092e
XL
15786#[inline]
15787#[target_feature(enable = "neon")]
15788#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15790pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 15791 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15792}
15793
15794/// Transpose vectors
f2b60f7d
FG
15795///
15796/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)
3c0e092e
XL
15797#[inline]
15798#[target_feature(enable = "neon")]
15799#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15801pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 15802 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15803}
15804
15805/// Transpose vectors
f2b60f7d
FG
15806///
15807/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)
3c0e092e
XL
15808#[inline]
15809#[target_feature(enable = "neon")]
15810#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15812pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 15813 simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
3c0e092e
XL
15814}
15815
15816/// Transpose vectors
f2b60f7d
FG
15817///
15818/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)
3c0e092e
XL
15819#[inline]
15820#[target_feature(enable = "neon")]
15821#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15823pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 15824 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15825}
15826
15827/// Transpose vectors
f2b60f7d
FG
15828///
15829/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)
3c0e092e
XL
15830#[inline]
15831#[target_feature(enable = "neon")]
15832#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15834pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 15835 simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
3c0e092e
XL
15836}
15837
15838/// Transpose vectors
f2b60f7d
FG
15839///
15840/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)
3c0e092e
XL
15841#[inline]
15842#[target_feature(enable = "neon")]
15843#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15845pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 15846 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15847}
15848
15849/// Transpose vectors
f2b60f7d
FG
15850///
15851/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)
3c0e092e
XL
15852#[inline]
15853#[target_feature(enable = "neon")]
15854#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15856pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 15857 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15858}
15859
15860/// Transpose vectors
f2b60f7d
FG
15861///
15862/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)
3c0e092e
XL
15863#[inline]
15864#[target_feature(enable = "neon")]
15865#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15867pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 15868 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15869}
15870
15871/// Transpose vectors
f2b60f7d
FG
15872///
15873/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)
3c0e092e
XL
15874#[inline]
15875#[target_feature(enable = "neon")]
15876#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15878pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 15879 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15880}
15881
15882/// Transpose vectors
f2b60f7d
FG
15883///
15884/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)
3c0e092e
XL
15885#[inline]
15886#[target_feature(enable = "neon")]
15887#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15889pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 15890 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15891}
15892
15893/// Transpose vectors
f2b60f7d
FG
15894///
15895/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)
3c0e092e
XL
15896#[inline]
15897#[target_feature(enable = "neon")]
15898#[cfg_attr(test, assert_instr(trn2))]
a2a8927a 15899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15900pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 15901 simd_shuffle!(a, b, [1, 5, 3, 7])
3c0e092e
XL
15902}
15903
15904/// Transpose vectors
f2b60f7d
FG
15905///
15906/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)
3c0e092e
XL
15907#[inline]
15908#[target_feature(enable = "neon")]
15909#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15911pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 15912 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15913}
15914
15915/// Transpose vectors
f2b60f7d
FG
15916///
15917/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)
3c0e092e
XL
15918#[inline]
15919#[target_feature(enable = "neon")]
15920#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 15921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15922pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 15923 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
15924}
15925
15926/// Zip vectors
f2b60f7d
FG
15927///
15928/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)
3c0e092e
XL
15929#[inline]
15930#[target_feature(enable = "neon")]
15931#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15933pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 15934 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
15935}
15936
15937/// Zip vectors
f2b60f7d
FG
15938///
15939/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)
3c0e092e
XL
15940#[inline]
15941#[target_feature(enable = "neon")]
15942#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15944pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 15945 simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
3c0e092e
XL
15946}
15947
15948/// Zip vectors
f2b60f7d
FG
15949///
15950/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)
3c0e092e
XL
15951#[inline]
15952#[target_feature(enable = "neon")]
15953#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15955pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 15956 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
15957}
15958
15959/// Zip vectors
f2b60f7d
FG
15960///
15961/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)
3c0e092e
XL
15962#[inline]
15963#[target_feature(enable = "neon")]
15964#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15966pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 15967 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
15968}
15969
15970/// Zip vectors
f2b60f7d
FG
15971///
15972/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)
3c0e092e
XL
15973#[inline]
15974#[target_feature(enable = "neon")]
15975#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15977pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 15978 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
15979}
15980
15981/// Zip vectors
f2b60f7d
FG
15982///
15983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)
3c0e092e
XL
15984#[inline]
15985#[target_feature(enable = "neon")]
15986#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15988pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 15989 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
15990}
15991
15992/// Zip vectors
f2b60f7d
FG
15993///
15994/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)
3c0e092e
XL
15995#[inline]
15996#[target_feature(enable = "neon")]
15997#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 15998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 15999pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 16000 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16001}
16002
16003/// Zip vectors
f2b60f7d
FG
16004///
16005/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)
3c0e092e
XL
16006#[inline]
16007#[target_feature(enable = "neon")]
16008#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16010pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 16011 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
16012}
16013
16014/// Zip vectors
f2b60f7d
FG
16015///
16016/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)
3c0e092e
XL
16017#[inline]
16018#[target_feature(enable = "neon")]
16019#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16021pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 16022 simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
3c0e092e
XL
16023}
16024
16025/// Zip vectors
f2b60f7d
FG
16026///
16027/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)
3c0e092e
XL
16028#[inline]
16029#[target_feature(enable = "neon")]
16030#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16032pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 16033 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
16034}
16035
16036/// Zip vectors
f2b60f7d
FG
16037///
16038/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)
3c0e092e
XL
16039#[inline]
16040#[target_feature(enable = "neon")]
16041#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16043pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 16044 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
16045}
16046
16047/// Zip vectors
f2b60f7d
FG
16048///
16049/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)
3c0e092e
XL
16050#[inline]
16051#[target_feature(enable = "neon")]
16052#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16054pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 16055 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16056}
16057
16058/// Zip vectors
f2b60f7d
FG
16059///
16060/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)
3c0e092e
XL
16061#[inline]
16062#[target_feature(enable = "neon")]
16063#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16065pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 16066 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
16067}
16068
16069/// Zip vectors
f2b60f7d
FG
16070///
16071/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)
3c0e092e
XL
16072#[inline]
16073#[target_feature(enable = "neon")]
16074#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16076pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 16077 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16078}
16079
16080/// Zip vectors
f2b60f7d
FG
16081///
16082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)
3c0e092e
XL
16083#[inline]
16084#[target_feature(enable = "neon")]
16085#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16087pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 16088 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
16089}
16090
16091/// Zip vectors
f2b60f7d
FG
16092///
16093/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)
3c0e092e
XL
16094#[inline]
16095#[target_feature(enable = "neon")]
16096#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16098pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 16099 simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
3c0e092e
XL
16100}
16101
16102/// Zip vectors
f2b60f7d
FG
16103///
16104/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)
3c0e092e
XL
16105#[inline]
16106#[target_feature(enable = "neon")]
16107#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16109pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 16110 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
16111}
16112
16113/// Zip vectors
f2b60f7d
FG
16114///
16115/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)
3c0e092e
XL
16116#[inline]
16117#[target_feature(enable = "neon")]
16118#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16120pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 16121 simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
3c0e092e
XL
16122}
16123
16124/// Zip vectors
f2b60f7d
FG
16125///
16126/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)
3c0e092e
XL
16127#[inline]
16128#[target_feature(enable = "neon")]
16129#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16131pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 16132 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16133}
16134
16135/// Zip vectors
f2b60f7d
FG
16136///
16137/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)
3c0e092e
XL
16138#[inline]
16139#[target_feature(enable = "neon")]
16140#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16142pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 16143 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16144}
16145
16146/// Zip vectors
f2b60f7d
FG
16147///
16148/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)
3c0e092e
XL
16149#[inline]
16150#[target_feature(enable = "neon")]
16151#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16153pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 16154 simd_shuffle!(a, b, [0, 4, 1, 5])
3c0e092e
XL
16155}
16156
16157/// Zip vectors
f2b60f7d
FG
16158///
16159/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)
3c0e092e
XL
16160#[inline]
16161#[target_feature(enable = "neon")]
16162#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16164pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 16165 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16166}
16167
16168/// Zip vectors
f2b60f7d
FG
16169///
16170/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)
3c0e092e
XL
16171#[inline]
16172#[target_feature(enable = "neon")]
16173#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16175pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 16176 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16177}
16178
16179/// Zip vectors
f2b60f7d
FG
16180///
16181/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)
3c0e092e
XL
16182#[inline]
16183#[target_feature(enable = "neon")]
16184#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16186pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 16187 simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
3c0e092e
XL
16188}
16189
16190/// Zip vectors
f2b60f7d
FG
16191///
16192/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)
3c0e092e
XL
16193#[inline]
16194#[target_feature(enable = "neon")]
16195#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16197pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 16198 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16199}
16200
16201/// Zip vectors
f2b60f7d
FG
16202///
16203/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)
3c0e092e
XL
16204#[inline]
16205#[target_feature(enable = "neon")]
16206#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16208pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 16209 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16210}
16211
16212/// Zip vectors
f2b60f7d
FG
16213///
16214/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)
3c0e092e
XL
16215#[inline]
16216#[target_feature(enable = "neon")]
16217#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16219pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 16220 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16221}
16222
16223/// Zip vectors
f2b60f7d
FG
16224///
16225/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)
3c0e092e
XL
16226#[inline]
16227#[target_feature(enable = "neon")]
16228#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16230pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 16231 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16232}
16233
16234/// Zip vectors
f2b60f7d
FG
16235///
16236/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)
3c0e092e
XL
16237#[inline]
16238#[target_feature(enable = "neon")]
16239#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16241pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 16242 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16243}
16244
16245/// Zip vectors
f2b60f7d
FG
16246///
16247/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)
3c0e092e
XL
16248#[inline]
16249#[target_feature(enable = "neon")]
16250#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16252pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 16253 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16254}
16255
16256/// Zip vectors
f2b60f7d
FG
16257///
16258/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)
3c0e092e
XL
16259#[inline]
16260#[target_feature(enable = "neon")]
16261#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16263pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 16264 simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
3c0e092e
XL
16265}
16266
16267/// Zip vectors
f2b60f7d
FG
16268///
16269/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)
3c0e092e
XL
16270#[inline]
16271#[target_feature(enable = "neon")]
16272#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16274pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 16275 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16276}
16277
16278/// Zip vectors
f2b60f7d
FG
16279///
16280/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)
3c0e092e
XL
16281#[inline]
16282#[target_feature(enable = "neon")]
16283#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16285pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 16286 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16287}
16288
16289/// Zip vectors
f2b60f7d
FG
16290///
16291/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)
3c0e092e
XL
16292#[inline]
16293#[target_feature(enable = "neon")]
16294#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16296pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 16297 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16298}
16299
16300/// Zip vectors
f2b60f7d
FG
16301///
16302/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)
3c0e092e
XL
16303#[inline]
16304#[target_feature(enable = "neon")]
16305#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16307pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 16308 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16309}
16310
16311/// Zip vectors
f2b60f7d
FG
16312///
16313/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)
3c0e092e
XL
16314#[inline]
16315#[target_feature(enable = "neon")]
16316#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16318pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 16319 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16320}
16321
16322/// Zip vectors
f2b60f7d
FG
16323///
16324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)
3c0e092e
XL
16325#[inline]
16326#[target_feature(enable = "neon")]
16327#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16329pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 16330 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16331}
16332
16333/// Zip vectors
f2b60f7d
FG
16334///
16335/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)
3c0e092e
XL
16336#[inline]
16337#[target_feature(enable = "neon")]
16338#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16340pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 16341 simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
3c0e092e
XL
16342}
16343
16344/// Zip vectors
f2b60f7d
FG
16345///
16346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)
3c0e092e
XL
16347#[inline]
16348#[target_feature(enable = "neon")]
16349#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16351pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 16352 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16353}
16354
16355/// Zip vectors
f2b60f7d
FG
16356///
16357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)
3c0e092e
XL
16358#[inline]
16359#[target_feature(enable = "neon")]
16360#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16362pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 16363 simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
3c0e092e
XL
16364}
16365
16366/// Zip vectors
f2b60f7d
FG
16367///
16368/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)
3c0e092e
XL
16369#[inline]
16370#[target_feature(enable = "neon")]
16371#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16373pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 16374 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16375}
16376
16377/// Zip vectors
f2b60f7d
FG
16378///
16379/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)
3c0e092e
XL
16380#[inline]
16381#[target_feature(enable = "neon")]
16382#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16384pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 16385 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16386}
16387
16388/// Zip vectors
f2b60f7d
FG
16389///
16390/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)
3c0e092e
XL
16391#[inline]
16392#[target_feature(enable = "neon")]
16393#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16395pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 16396 simd_shuffle!(a, b, [2, 6, 3, 7])
3c0e092e
XL
16397}
16398
16399/// Zip vectors
f2b60f7d
FG
16400///
16401/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)
3c0e092e
XL
16402#[inline]
16403#[target_feature(enable = "neon")]
16404#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16406pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 16407 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16408}
16409
16410/// Unzip vectors
f2b60f7d
FG
16411///
16412/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)
3c0e092e
XL
16413#[inline]
16414#[target_feature(enable = "neon")]
16415#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16417pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 16418 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16419}
16420
16421/// Unzip vectors
f2b60f7d
FG
16422///
16423/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)
3c0e092e
XL
16424#[inline]
16425#[target_feature(enable = "neon")]
16426#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16428pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 16429 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
3c0e092e
XL
16430}
16431
16432/// Unzip vectors
f2b60f7d
FG
16433///
16434/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)
3c0e092e
XL
16435#[inline]
16436#[target_feature(enable = "neon")]
16437#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16439pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 16440 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16441}
16442
16443/// Unzip vectors
f2b60f7d
FG
16444///
16445/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)
3c0e092e
XL
16446#[inline]
16447#[target_feature(enable = "neon")]
16448#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16450pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 16451 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16452}
16453
16454/// Unzip vectors
f2b60f7d
FG
16455///
16456/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)
3c0e092e
XL
16457#[inline]
16458#[target_feature(enable = "neon")]
16459#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16461pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 16462 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16463}
16464
16465/// Unzip vectors
f2b60f7d
FG
16466///
16467/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)
3c0e092e
XL
16468#[inline]
16469#[target_feature(enable = "neon")]
16470#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16472pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 16473 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16474}
16475
16476/// Unzip vectors
f2b60f7d
FG
16477///
16478/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)
3c0e092e
XL
16479#[inline]
16480#[target_feature(enable = "neon")]
16481#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16483pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 16484 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
3c0e092e
XL
16485}
16486
16487/// Unzip vectors
f2b60f7d
FG
16488///
16489/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)
3c0e092e
XL
16490#[inline]
16491#[target_feature(enable = "neon")]
16492#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16494pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 16495 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16496}
16497
16498/// Unzip vectors
f2b60f7d
FG
16499///
16500/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)
3c0e092e
XL
16501#[inline]
16502#[target_feature(enable = "neon")]
16503#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16505pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 16506 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16507}
16508
16509/// Unzip vectors
f2b60f7d
FG
16510///
16511/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)
3c0e092e
XL
16512#[inline]
16513#[target_feature(enable = "neon")]
16514#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16516pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 16517 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16518}
16519
16520/// Unzip vectors
f2b60f7d
FG
16521///
16522/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)
3c0e092e
XL
16523#[inline]
16524#[target_feature(enable = "neon")]
16525#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16527pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 16528 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16529}
16530
16531/// Unzip vectors
f2b60f7d
FG
16532///
16533/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)
3c0e092e
XL
16534#[inline]
16535#[target_feature(enable = "neon")]
16536#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16538pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 16539 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
3c0e092e
XL
16540}
16541
16542/// Unzip vectors
f2b60f7d
FG
16543///
16544/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)
3c0e092e
XL
16545#[inline]
16546#[target_feature(enable = "neon")]
16547#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16549pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 16550 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16551}
16552
16553/// Unzip vectors
f2b60f7d
FG
16554///
16555/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)
3c0e092e
XL
16556#[inline]
16557#[target_feature(enable = "neon")]
16558#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16560pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 16561 simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
3c0e092e
XL
16562}
16563
16564/// Unzip vectors
f2b60f7d
FG
16565///
16566/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)
3c0e092e
XL
16567#[inline]
16568#[target_feature(enable = "neon")]
16569#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16571pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 16572 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16573}
16574
16575/// Unzip vectors
f2b60f7d
FG
16576///
16577/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)
3c0e092e
XL
16578#[inline]
16579#[target_feature(enable = "neon")]
16580#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16582pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 16583 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16584}
16585
16586/// Unzip vectors
f2b60f7d
FG
16587///
16588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)
3c0e092e
XL
16589#[inline]
16590#[target_feature(enable = "neon")]
16591#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16593pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 16594 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16595}
16596
16597/// Unzip vectors
f2b60f7d
FG
16598///
16599/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)
3c0e092e
XL
16600#[inline]
16601#[target_feature(enable = "neon")]
16602#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16604pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 16605 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16606}
16607
16608/// Unzip vectors
f2b60f7d
FG
16609///
16610/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)
3c0e092e
XL
16611#[inline]
16612#[target_feature(enable = "neon")]
16613#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16615pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 16616 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16617}
16618
16619/// Unzip vectors
f2b60f7d
FG
16620///
16621/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)
3c0e092e
XL
16622#[inline]
16623#[target_feature(enable = "neon")]
16624#[cfg_attr(test, assert_instr(uzp1))]
a2a8927a 16625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16626pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 16627 simd_shuffle!(a, b, [0, 2, 4, 6])
3c0e092e
XL
16628}
16629
16630/// Unzip vectors
f2b60f7d
FG
16631///
16632/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)
3c0e092e
XL
16633#[inline]
16634#[target_feature(enable = "neon")]
16635#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16637pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 16638 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16639}
16640
16641/// Unzip vectors
f2b60f7d
FG
16642///
16643/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)
3c0e092e
XL
16644#[inline]
16645#[target_feature(enable = "neon")]
16646#[cfg_attr(test, assert_instr(zip1))]
a2a8927a 16647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16648pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 16649 simd_shuffle!(a, b, [0, 2])
3c0e092e
XL
16650}
16651
16652/// Unzip vectors
f2b60f7d
FG
16653///
16654/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)
3c0e092e
XL
16655#[inline]
16656#[target_feature(enable = "neon")]
16657#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16659pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
353b0b11 16660 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16661}
16662
16663/// Unzip vectors
f2b60f7d
FG
16664///
16665/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)
3c0e092e
XL
16666#[inline]
16667#[target_feature(enable = "neon")]
16668#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16670pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
353b0b11 16671 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
3c0e092e
XL
16672}
16673
16674/// Unzip vectors
f2b60f7d
FG
16675///
16676/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)
3c0e092e
XL
16677#[inline]
16678#[target_feature(enable = "neon")]
16679#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16681pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
353b0b11 16682 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16683}
16684
16685/// Unzip vectors
f2b60f7d
FG
16686///
16687/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)
3c0e092e
XL
16688#[inline]
16689#[target_feature(enable = "neon")]
16690#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16692pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
353b0b11 16693 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16694}
16695
16696/// Unzip vectors
f2b60f7d
FG
16697///
16698/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)
3c0e092e
XL
16699#[inline]
16700#[target_feature(enable = "neon")]
16701#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16703pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
353b0b11 16704 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16705}
16706
16707/// Unzip vectors
f2b60f7d
FG
16708///
16709/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)
3c0e092e
XL
16710#[inline]
16711#[target_feature(enable = "neon")]
16712#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16714pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
353b0b11 16715 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16716}
16717
16718/// Unzip vectors
f2b60f7d
FG
16719///
16720/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)
3c0e092e
XL
16721#[inline]
16722#[target_feature(enable = "neon")]
16723#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16725pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
353b0b11 16726 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
3c0e092e
XL
16727}
16728
16729/// Unzip vectors
f2b60f7d
FG
16730///
16731/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)
3c0e092e
XL
16732#[inline]
16733#[target_feature(enable = "neon")]
16734#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16736pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
353b0b11 16737 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16738}
16739
16740/// Unzip vectors
f2b60f7d
FG
16741///
16742/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)
3c0e092e
XL
16743#[inline]
16744#[target_feature(enable = "neon")]
16745#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16747pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
353b0b11 16748 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16749}
16750
16751/// Unzip vectors
f2b60f7d
FG
16752///
16753/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)
3c0e092e
XL
16754#[inline]
16755#[target_feature(enable = "neon")]
16756#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16758pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
353b0b11 16759 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16760}
16761
16762/// Unzip vectors
f2b60f7d
FG
16763///
16764/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)
3c0e092e
XL
16765#[inline]
16766#[target_feature(enable = "neon")]
16767#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16769pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
353b0b11 16770 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16771}
16772
16773/// Unzip vectors
f2b60f7d
FG
16774///
16775/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)
3c0e092e
XL
16776#[inline]
16777#[target_feature(enable = "neon")]
16778#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16780pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
353b0b11 16781 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
3c0e092e
XL
16782}
16783
16784/// Unzip vectors
f2b60f7d
FG
16785///
16786/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)
3c0e092e
XL
16787#[inline]
16788#[target_feature(enable = "neon")]
16789#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16791pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
353b0b11 16792 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16793}
16794
16795/// Unzip vectors
f2b60f7d
FG
16796///
16797/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)
3c0e092e
XL
16798#[inline]
16799#[target_feature(enable = "neon")]
16800#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16802pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
353b0b11 16803 simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
3c0e092e
XL
16804}
16805
16806/// Unzip vectors
f2b60f7d
FG
16807///
16808/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)
3c0e092e
XL
16809#[inline]
16810#[target_feature(enable = "neon")]
16811#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16813pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
353b0b11 16814 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16815}
16816
16817/// Unzip vectors
f2b60f7d
FG
16818///
16819/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)
3c0e092e
XL
16820#[inline]
16821#[target_feature(enable = "neon")]
16822#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16824pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
353b0b11 16825 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16826}
16827
16828/// Unzip vectors
f2b60f7d
FG
16829///
16830/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)
3c0e092e
XL
16831#[inline]
16832#[target_feature(enable = "neon")]
16833#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16835pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
353b0b11 16836 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16837}
16838
16839/// Unzip vectors
f2b60f7d
FG
16840///
16841/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)
3c0e092e
XL
16842#[inline]
16843#[target_feature(enable = "neon")]
16844#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16846pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
353b0b11 16847 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16848}
16849
16850/// Unzip vectors
f2b60f7d
FG
16851///
16852/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)
3c0e092e
XL
16853#[inline]
16854#[target_feature(enable = "neon")]
16855#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16857pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
353b0b11 16858 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16859}
16860
16861/// Unzip vectors
f2b60f7d
FG
16862///
16863/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)
3c0e092e
XL
16864#[inline]
16865#[target_feature(enable = "neon")]
16866#[cfg_attr(test, assert_instr(uzp2))]
a2a8927a 16867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16868pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
353b0b11 16869 simd_shuffle!(a, b, [1, 3, 5, 7])
3c0e092e
XL
16870}
16871
16872/// Unzip vectors
f2b60f7d
FG
16873///
16874/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)
3c0e092e
XL
16875#[inline]
16876#[target_feature(enable = "neon")]
16877#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16879pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
353b0b11 16880 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16881}
16882
16883/// Unzip vectors
f2b60f7d
FG
16884///
16885/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)
3c0e092e
XL
16886#[inline]
16887#[target_feature(enable = "neon")]
16888#[cfg_attr(test, assert_instr(zip2))]
a2a8927a 16889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16890pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
353b0b11 16891 simd_shuffle!(a, b, [1, 3])
3c0e092e
XL
16892}
16893
16894/// Unsigned Absolute difference and Accumulate Long
f2b60f7d
FG
16895///
16896/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)
3c0e092e
XL
16897#[inline]
16898#[target_feature(enable = "neon")]
16899#[cfg_attr(test, assert_instr(uabal))]
a2a8927a 16900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16901pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
353b0b11
FG
16902 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
16903 let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
16904 let f: uint8x8_t = vabd_u8(d, e);
16905 simd_add(a, simd_cast(f))
16906}
16907
16908/// Unsigned Absolute difference and Accumulate Long
f2b60f7d
FG
16909///
16910/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)
3c0e092e
XL
16911#[inline]
16912#[target_feature(enable = "neon")]
16913#[cfg_attr(test, assert_instr(uabal))]
a2a8927a 16914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16915pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
353b0b11
FG
16916 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
16917 let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
3c0e092e
XL
16918 let f: uint16x4_t = vabd_u16(d, e);
16919 simd_add(a, simd_cast(f))
16920}
16921
16922/// Unsigned Absolute difference and Accumulate Long
f2b60f7d
FG
16923///
16924/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)
3c0e092e
XL
16925#[inline]
16926#[target_feature(enable = "neon")]
16927#[cfg_attr(test, assert_instr(uabal))]
a2a8927a 16928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16929pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
353b0b11
FG
16930 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
16931 let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
3c0e092e
XL
16932 let f: uint32x2_t = vabd_u32(d, e);
16933 simd_add(a, simd_cast(f))
16934}
16935
16936/// Signed Absolute difference and Accumulate Long
f2b60f7d
FG
16937///
16938/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)
3c0e092e
XL
16939#[inline]
16940#[target_feature(enable = "neon")]
16941#[cfg_attr(test, assert_instr(sabal))]
a2a8927a 16942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16943pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
353b0b11
FG
16944 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
16945 let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
3c0e092e
XL
16946 let f: int8x8_t = vabd_s8(d, e);
16947 let f: uint8x8_t = simd_cast(f);
16948 simd_add(a, simd_cast(f))
16949}
16950
16951/// Signed Absolute difference and Accumulate Long
f2b60f7d
FG
16952///
16953/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)
3c0e092e
XL
16954#[inline]
16955#[target_feature(enable = "neon")]
16956#[cfg_attr(test, assert_instr(sabal))]
a2a8927a 16957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16958pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
353b0b11
FG
16959 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
16960 let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
3c0e092e
XL
16961 let f: int16x4_t = vabd_s16(d, e);
16962 let f: uint16x4_t = simd_cast(f);
16963 simd_add(a, simd_cast(f))
16964}
16965
16966/// Signed Absolute difference and Accumulate Long
f2b60f7d
FG
16967///
16968/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)
3c0e092e
XL
16969#[inline]
16970#[target_feature(enable = "neon")]
16971#[cfg_attr(test, assert_instr(sabal))]
a2a8927a 16972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 16973pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
353b0b11
FG
16974 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
16975 let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
3c0e092e
XL
16976 let f: int32x2_t = vabd_s32(d, e);
16977 let f: uint32x2_t = simd_cast(f);
16978 simd_add(a, simd_cast(f))
16979}
16980
353b0b11 16981/// Signed saturating Absolute value
f2b60f7d
FG
16982///
16983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)
3c0e092e
XL
16984#[inline]
16985#[target_feature(enable = "neon")]
16986#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 16987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
16988pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16989 #[allow(improper_ctypes)]
16990 extern "unadjusted" {
16991 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v1i64")]
16992 fn vqabs_s64_(a: int64x1_t) -> int64x1_t;
16993 }
16994 vqabs_s64_(a)
16995}
16996
353b0b11 16997/// Signed saturating Absolute value
f2b60f7d
FG
16998///
16999/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)
3c0e092e
XL
17000#[inline]
17001#[target_feature(enable = "neon")]
17002#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 17003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
17004pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
17005 #[allow(improper_ctypes)]
17006 extern "unadjusted" {
17007 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v2i64")]
17008 fn vqabsq_s64_(a: int64x2_t) -> int64x2_t;
17009 }
17010 vqabsq_s64_(a)
17011}
17012
17013/// Signed saturating absolute value
f2b60f7d
FG
17014///
17015/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)
3c0e092e
XL
17016#[inline]
17017#[target_feature(enable = "neon")]
17018#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 17019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
17020pub unsafe fn vqabsb_s8(a: i8) -> i8 {
17021 simd_extract(vqabs_s8(vdup_n_s8(a)), 0)
17022}
17023
17024/// Signed saturating absolute value
f2b60f7d
FG
17025///
17026/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)
3c0e092e
XL
17027#[inline]
17028#[target_feature(enable = "neon")]
17029#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 17030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
17031pub unsafe fn vqabsh_s16(a: i16) -> i16 {
17032 simd_extract(vqabs_s16(vdup_n_s16(a)), 0)
17033}
17034
17035/// Signed saturating absolute value
f2b60f7d
FG
17036///
17037/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)
3c0e092e
XL
17038#[inline]
17039#[target_feature(enable = "neon")]
17040#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 17041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
17042pub unsafe fn vqabss_s32(a: i32) -> i32 {
17043 #[allow(improper_ctypes)]
17044 extern "unadjusted" {
17045 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.i32")]
17046 fn vqabss_s32_(a: i32) -> i32;
17047 }
17048 vqabss_s32_(a)
17049}
17050
17051/// Signed saturating absolute value
f2b60f7d
FG
17052///
17053/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)
3c0e092e
XL
17054#[inline]
17055#[target_feature(enable = "neon")]
17056#[cfg_attr(test, assert_instr(sqabs))]
a2a8927a 17057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e
XL
17058pub unsafe fn vqabsd_s64(a: i64) -> i64 {
17059 #[allow(improper_ctypes)]
17060 extern "unadjusted" {
17061 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.i64")]
17062 fn vqabsd_s64_(a: i64) -> i64;
17063 }
17064 vqabsd_s64_(a)
17065}
17066
17067/// Shift left and insert
f2b60f7d
FG
17068///
17069/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)
3c0e092e
XL
17070#[inline]
17071#[target_feature(enable = "neon")]
17072#[cfg_attr(test, assert_instr(sli, N = 2))]
17073#[rustc_legacy_const_generics(2)]
a2a8927a 17074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 17075pub unsafe fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
353b0b11 17076 static_assert!(N >= 0 && N <= 63);
3c0e092e
XL
17077 transmute(vsli_n_s64::<N>(transmute(a), transmute(b)))
17078}
17079
17080/// Shift left and insert
f2b60f7d
FG
17081///
17082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)
3c0e092e
XL
17083#[inline]
17084#[target_feature(enable = "neon")]
17085#[cfg_attr(test, assert_instr(sli, N = 2))]
17086#[rustc_legacy_const_generics(2)]
a2a8927a 17087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 17088pub unsafe fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
353b0b11 17089 static_assert!(N >= 0 && N <= 63);
3c0e092e
XL
17090 transmute(vsli_n_u64::<N>(transmute(a), transmute(b)))
17091}
17092
17093/// Shift right and insert
f2b60f7d
FG
17094///
17095/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)
3c0e092e
XL
17096#[inline]
17097#[target_feature(enable = "neon")]
17098#[cfg_attr(test, assert_instr(sri, N = 2))]
17099#[rustc_legacy_const_generics(2)]
a2a8927a 17100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 17101pub unsafe fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
353b0b11 17102 static_assert!(N >= 1 && N <= 64);
3c0e092e
XL
17103 transmute(vsri_n_s64::<N>(transmute(a), transmute(b)))
17104}
17105
17106/// Shift right and insert
f2b60f7d
FG
17107///
17108/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)
3c0e092e
XL
17109#[inline]
17110#[target_feature(enable = "neon")]
17111#[cfg_attr(test, assert_instr(sri, N = 2))]
17112#[rustc_legacy_const_generics(2)]
a2a8927a 17113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3c0e092e 17114pub unsafe fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
353b0b11 17115 static_assert!(N >= 1 && N <= 64);
3c0e092e
XL
17116 transmute(vsri_n_u64::<N>(transmute(a), transmute(b)))
17117}
17118
17119#[cfg(test)]
17120mod test {
17121 use super::*;
17122 use crate::core_arch::simd::*;
17123 use std::mem::transmute;
17124 use stdarch_test::simd_test;
17125
17126 #[simd_test(enable = "neon,sha3")]
17127 unsafe fn test_veor3q_s8() {
17128 let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17129 let b: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17130 let c: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17131 let e: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17132 let r: i8x16 = transmute(veor3q_s8(transmute(a), transmute(b), transmute(c)));
17133 assert_eq!(r, e);
17134 }
17135
17136 #[simd_test(enable = "neon,sha3")]
17137 unsafe fn test_veor3q_s16() {
17138 let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17139 let b: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17140 let c: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17141 let e: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17142 let r: i16x8 = transmute(veor3q_s16(transmute(a), transmute(b), transmute(c)));
17143 assert_eq!(r, e);
17144 }
17145
17146 #[simd_test(enable = "neon,sha3")]
17147 unsafe fn test_veor3q_s32() {
17148 let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
17149 let b: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
17150 let c: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
17151 let e: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
17152 let r: i32x4 = transmute(veor3q_s32(transmute(a), transmute(b), transmute(c)));
17153 assert_eq!(r, e);
17154 }
17155
17156 #[simd_test(enable = "neon,sha3")]
17157 unsafe fn test_veor3q_s64() {
17158 let a: i64x2 = i64x2::new(0x00, 0x01);
17159 let b: i64x2 = i64x2::new(0x00, 0x00);
17160 let c: i64x2 = i64x2::new(0x00, 0x00);
17161 let e: i64x2 = i64x2::new(0x00, 0x01);
17162 let r: i64x2 = transmute(veor3q_s64(transmute(a), transmute(b), transmute(c)));
17163 assert_eq!(r, e);
17164 }
17165
17166 #[simd_test(enable = "neon,sha3")]
17167 unsafe fn test_veor3q_u8() {
17168 let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17169 let b: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17170 let c: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17171 let e: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17172 let r: u8x16 = transmute(veor3q_u8(transmute(a), transmute(b), transmute(c)));
17173 assert_eq!(r, e);
17174 }
17175
17176 #[simd_test(enable = "neon,sha3")]
17177 unsafe fn test_veor3q_u16() {
17178 let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17179 let b: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17180 let c: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17181 let e: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17182 let r: u16x8 = transmute(veor3q_u16(transmute(a), transmute(b), transmute(c)));
17183 assert_eq!(r, e);
17184 }
17185
17186 #[simd_test(enable = "neon,sha3")]
17187 unsafe fn test_veor3q_u32() {
17188 let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
17189 let b: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
17190 let c: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
17191 let e: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
17192 let r: u32x4 = transmute(veor3q_u32(transmute(a), transmute(b), transmute(c)));
17193 assert_eq!(r, e);
17194 }
17195
17196 #[simd_test(enable = "neon,sha3")]
17197 unsafe fn test_veor3q_u64() {
17198 let a: u64x2 = u64x2::new(0x00, 0x01);
17199 let b: u64x2 = u64x2::new(0x00, 0x00);
17200 let c: u64x2 = u64x2::new(0x00, 0x00);
17201 let e: u64x2 = u64x2::new(0x00, 0x01);
17202 let r: u64x2 = transmute(veor3q_u64(transmute(a), transmute(b), transmute(c)));
17203 assert_eq!(r, e);
17204 }
17205
17206 #[simd_test(enable = "neon")]
17207 unsafe fn test_vabd_f64() {
17208 let a: f64 = 1.0;
17209 let b: f64 = 9.0;
17210 let e: f64 = 8.0;
17df50a5
XL
17211 let r: f64 = transmute(vabd_f64(transmute(a), transmute(b)));
17212 assert_eq!(r, e);
17213 }
17214
17215 #[simd_test(enable = "neon")]
3c0e092e
XL
17216 unsafe fn test_vabdq_f64() {
17217 let a: f64x2 = f64x2::new(1.0, 2.0);
17218 let b: f64x2 = f64x2::new(9.0, 3.0);
17219 let e: f64x2 = f64x2::new(8.0, 1.0);
17220 let r: f64x2 = transmute(vabdq_f64(transmute(a), transmute(b)));
17221 assert_eq!(r, e);
17222 }
17223
17224 #[simd_test(enable = "neon")]
17225 unsafe fn test_vabds_f32() {
17226 let a: f32 = 1.0;
17227 let b: f32 = 9.0;
17228 let e: f32 = 8.0;
17229 let r: f32 = transmute(vabds_f32(transmute(a), transmute(b)));
17230 assert_eq!(r, e);
17231 }
17232
17233 #[simd_test(enable = "neon")]
17234 unsafe fn test_vabdd_f64() {
17235 let a: f64 = 1.0;
17236 let b: f64 = 9.0;
17237 let e: f64 = 8.0;
17238 let r: f64 = transmute(vabdd_f64(transmute(a), transmute(b)));
17239 assert_eq!(r, e);
17240 }
17241
17242 #[simd_test(enable = "neon")]
17243 unsafe fn test_vabdl_high_u8() {
17244 let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
17245 let b: u8x16 = u8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10);
17246 let e: u16x8 = u16x8::new(1, 0, 1, 2, 3, 4, 5, 6);
17247 let r: u16x8 = transmute(vabdl_high_u8(transmute(a), transmute(b)));
17248 assert_eq!(r, e);
17249 }
17250
17251 #[simd_test(enable = "neon")]
17252 unsafe fn test_vabdl_high_u16() {
17253 let a: u16x8 = u16x8::new(1, 2, 3, 4, 8, 9, 11, 12);
17254 let b: u16x8 = u16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
17255 let e: u32x4 = u32x4::new(2, 1, 1, 2);
17256 let r: u32x4 = transmute(vabdl_high_u16(transmute(a), transmute(b)));
17257 assert_eq!(r, e);
17258 }
17259
17260 #[simd_test(enable = "neon")]
17261 unsafe fn test_vabdl_high_u32() {
17262 let a: u32x4 = u32x4::new(1, 2, 3, 4);
17263 let b: u32x4 = u32x4::new(10, 10, 10, 10);
17264 let e: u64x2 = u64x2::new(7, 6);
17265 let r: u64x2 = transmute(vabdl_high_u32(transmute(a), transmute(b)));
17266 assert_eq!(r, e);
17267 }
17268
17269 #[simd_test(enable = "neon")]
17270 unsafe fn test_vabdl_high_s8() {
17271 let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
17272 let b: i8x16 = i8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10);
17273 let e: i16x8 = i16x8::new(1, 0, 1, 2, 3, 4, 5, 6);
17274 let r: i16x8 = transmute(vabdl_high_s8(transmute(a), transmute(b)));
17275 assert_eq!(r, e);
17276 }
17277
17278 #[simd_test(enable = "neon")]
17279 unsafe fn test_vabdl_high_s16() {
17280 let a: i16x8 = i16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
17281 let b: i16x8 = i16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
17282 let e: i32x4 = i32x4::new(1, 0, 1, 2);
17283 let r: i32x4 = transmute(vabdl_high_s16(transmute(a), transmute(b)));
17284 assert_eq!(r, e);
17285 }
17286
17287 #[simd_test(enable = "neon")]
17288 unsafe fn test_vabdl_high_s32() {
17289 let a: i32x4 = i32x4::new(1, 2, 3, 4);
17290 let b: i32x4 = i32x4::new(10, 10, 10, 10);
17291 let e: i64x2 = i64x2::new(7, 6);
17292 let r: i64x2 = transmute(vabdl_high_s32(transmute(a), transmute(b)));
17293 assert_eq!(r, e);
17294 }
17295
17296 #[simd_test(enable = "neon")]
17297 unsafe fn test_vceq_u64() {
17298 let a: u64x1 = u64x1::new(0);
17299 let b: u64x1 = u64x1::new(0);
17300 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17301 let r: u64x1 = transmute(vceq_u64(transmute(a), transmute(b)));
17302 assert_eq!(r, e);
17303
17304 let a: u64x1 = u64x1::new(0);
17305 let b: u64x1 = u64x1::new(0);
17306 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17307 let r: u64x1 = transmute(vceq_u64(transmute(a), transmute(b)));
17308 assert_eq!(r, e);
17309 }
17310
17311 #[simd_test(enable = "neon")]
17312 unsafe fn test_vceqq_u64() {
17313 let a: u64x2 = u64x2::new(0, 0x01);
17314 let b: u64x2 = u64x2::new(0, 0x01);
17315 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17316 let r: u64x2 = transmute(vceqq_u64(transmute(a), transmute(b)));
17317 assert_eq!(r, e);
17318
17319 let a: u64x2 = u64x2::new(0, 0);
17320 let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17321 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17322 let r: u64x2 = transmute(vceqq_u64(transmute(a), transmute(b)));
17323 assert_eq!(r, e);
17324 }
17325
17326 #[simd_test(enable = "neon")]
17327 unsafe fn test_vceq_s64() {
17328 let a: i64x1 = i64x1::new(-9223372036854775808);
17329 let b: i64x1 = i64x1::new(-9223372036854775808);
17330 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17331 let r: u64x1 = transmute(vceq_s64(transmute(a), transmute(b)));
17332 assert_eq!(r, e);
17333
17334 let a: i64x1 = i64x1::new(-9223372036854775808);
17335 let b: i64x1 = i64x1::new(-9223372036854775808);
17336 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17337 let r: u64x1 = transmute(vceq_s64(transmute(a), transmute(b)));
17338 assert_eq!(r, e);
17339 }
17340
17341 #[simd_test(enable = "neon")]
17342 unsafe fn test_vceqq_s64() {
17343 let a: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17344 let b: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17345 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17346 let r: u64x2 = transmute(vceqq_s64(transmute(a), transmute(b)));
17347 assert_eq!(r, e);
17348
17349 let a: i64x2 = i64x2::new(-9223372036854775808, -9223372036854775808);
17350 let b: i64x2 = i64x2::new(-9223372036854775808, 0x7F_FF_FF_FF_FF_FF_FF_FF);
17351 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17352 let r: u64x2 = transmute(vceqq_s64(transmute(a), transmute(b)));
17353 assert_eq!(r, e);
17354 }
17355
17356 #[simd_test(enable = "neon")]
17357 unsafe fn test_vceq_p64() {
17358 let a: i64x1 = i64x1::new(-9223372036854775808);
17359 let b: i64x1 = i64x1::new(-9223372036854775808);
17360 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17361 let r: u64x1 = transmute(vceq_p64(transmute(a), transmute(b)));
17362 assert_eq!(r, e);
17363
17364 let a: i64x1 = i64x1::new(-9223372036854775808);
17365 let b: i64x1 = i64x1::new(-9223372036854775808);
17366 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17367 let r: u64x1 = transmute(vceq_p64(transmute(a), transmute(b)));
17368 assert_eq!(r, e);
17369 }
17370
17371 #[simd_test(enable = "neon")]
17372 unsafe fn test_vceqq_p64() {
17373 let a: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17374 let b: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17375 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17376 let r: u64x2 = transmute(vceqq_p64(transmute(a), transmute(b)));
17377 assert_eq!(r, e);
17378
17379 let a: i64x2 = i64x2::new(-9223372036854775808, -9223372036854775808);
17380 let b: i64x2 = i64x2::new(-9223372036854775808, 0x7F_FF_FF_FF_FF_FF_FF_FF);
17381 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17382 let r: u64x2 = transmute(vceqq_p64(transmute(a), transmute(b)));
17383 assert_eq!(r, e);
17384 }
17385
17386 #[simd_test(enable = "neon")]
17387 unsafe fn test_vceq_f64() {
17388 let a: f64 = 1.2;
17389 let b: f64 = 1.2;
17390 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17391 let r: u64x1 = transmute(vceq_f64(transmute(a), transmute(b)));
17392 assert_eq!(r, e);
17393 }
17394
17395 #[simd_test(enable = "neon")]
17396 unsafe fn test_vceqq_f64() {
17397 let a: f64x2 = f64x2::new(1.2, 3.4);
17398 let b: f64x2 = f64x2::new(1.2, 3.4);
17399 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17400 let r: u64x2 = transmute(vceqq_f64(transmute(a), transmute(b)));
17401 assert_eq!(r, e);
17402 }
17403
17404 #[simd_test(enable = "neon")]
17405 unsafe fn test_vceqd_s64() {
17406 let a: i64 = 1;
17407 let b: i64 = 2;
17408 let e: u64 = 0;
17409 let r: u64 = transmute(vceqd_s64(transmute(a), transmute(b)));
17410 assert_eq!(r, e);
17411 }
17412
17413 #[simd_test(enable = "neon")]
17414 unsafe fn test_vceqd_u64() {
17415 let a: u64 = 1;
17416 let b: u64 = 2;
17417 let e: u64 = 0;
17418 let r: u64 = transmute(vceqd_u64(transmute(a), transmute(b)));
17419 assert_eq!(r, e);
17420 }
17421
17422 #[simd_test(enable = "neon")]
17423 unsafe fn test_vceqs_f32() {
17424 let a: f32 = 1.;
17425 let b: f32 = 2.;
17426 let e: u32 = 0;
17427 let r: u32 = transmute(vceqs_f32(transmute(a), transmute(b)));
17428 assert_eq!(r, e);
17429 }
17430
17431 #[simd_test(enable = "neon")]
17432 unsafe fn test_vceqd_f64() {
17433 let a: f64 = 1.;
17434 let b: f64 = 2.;
17435 let e: u64 = 0;
17436 let r: u64 = transmute(vceqd_f64(transmute(a), transmute(b)));
17437 assert_eq!(r, e);
17438 }
17439
17440 #[simd_test(enable = "neon")]
17441 unsafe fn test_vceqz_s8() {
17442 let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17443 let e: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
17444 let r: u8x8 = transmute(vceqz_s8(transmute(a)));
17445 assert_eq!(r, e);
17446 }
17447
17448 #[simd_test(enable = "neon")]
17449 unsafe fn test_vceqzq_s8() {
17450 let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
17451 let e: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17452 let r: u8x16 = transmute(vceqzq_s8(transmute(a)));
17453 assert_eq!(r, e);
17454 }
17455
17456 #[simd_test(enable = "neon")]
17457 unsafe fn test_vceqz_s16() {
17458 let a: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
17459 let e: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
17460 let r: u16x4 = transmute(vceqz_s16(transmute(a)));
17461 assert_eq!(r, e);
17462 }
17463
17464 #[simd_test(enable = "neon")]
17465 unsafe fn test_vceqzq_s16() {
17466 let a: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17467 let e: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
17468 let r: u16x8 = transmute(vceqzq_s16(transmute(a)));
17469 assert_eq!(r, e);
17470 }
17471
17472 #[simd_test(enable = "neon")]
17473 unsafe fn test_vceqz_s32() {
17474 let a: i32x2 = i32x2::new(-2147483648, 0x00);
17475 let e: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
17476 let r: u32x2 = transmute(vceqz_s32(transmute(a)));
17477 assert_eq!(r, e);
17478 }
17479
17480 #[simd_test(enable = "neon")]
17481 unsafe fn test_vceqzq_s32() {
17482 let a: i32x4 = i32x4::new(-2147483648, 0x00, 0x01, 0x02);
17483 let e: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
17484 let r: u32x4 = transmute(vceqzq_s32(transmute(a)));
17485 assert_eq!(r, e);
17486 }
17487
17488 #[simd_test(enable = "neon")]
17489 unsafe fn test_vceqz_s64() {
17490 let a: i64x1 = i64x1::new(-9223372036854775808);
17491 let e: u64x1 = u64x1::new(0);
17492 let r: u64x1 = transmute(vceqz_s64(transmute(a)));
17493 assert_eq!(r, e);
17494 }
17495
17496 #[simd_test(enable = "neon")]
17497 unsafe fn test_vceqzq_s64() {
17498 let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17499 let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17500 let r: u64x2 = transmute(vceqzq_s64(transmute(a)));
17501 assert_eq!(r, e);
17502 }
17503
17504 #[simd_test(enable = "neon")]
17505 unsafe fn test_vceqz_p8() {
17506 let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17507 let e: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
17508 let r: u8x8 = transmute(vceqz_p8(transmute(a)));
17509 assert_eq!(r, e);
17510 }
17511
17512 #[simd_test(enable = "neon")]
17513 unsafe fn test_vceqzq_p8() {
17514 let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
17515 let e: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17516 let r: u8x16 = transmute(vceqzq_p8(transmute(a)));
17517 assert_eq!(r, e);
17518 }
17519
17520 #[simd_test(enable = "neon")]
17521 unsafe fn test_vceqz_p64() {
17522 let a: i64x1 = i64x1::new(-9223372036854775808);
17523 let e: u64x1 = u64x1::new(0);
17524 let r: u64x1 = transmute(vceqz_p64(transmute(a)));
17525 assert_eq!(r, e);
17526 }
17527
17528 #[simd_test(enable = "neon")]
17529 unsafe fn test_vceqzq_p64() {
17530 let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17531 let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17532 let r: u64x2 = transmute(vceqzq_p64(transmute(a)));
17533 assert_eq!(r, e);
17534 }
17535
17536 #[simd_test(enable = "neon")]
17537 unsafe fn test_vceqz_u8() {
17538 let a: u8x8 = u8x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17539 let e: u8x8 = u8x8::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0);
17540 let r: u8x8 = transmute(vceqz_u8(transmute(a)));
17541 assert_eq!(r, e);
17542 }
17543
17544 #[simd_test(enable = "neon")]
17545 unsafe fn test_vceqzq_u8() {
17546 let a: u8x16 = u8x16::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0xFF);
17547 let e: u8x16 = u8x16::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17548 let r: u8x16 = transmute(vceqzq_u8(transmute(a)));
17549 assert_eq!(r, e);
17550 }
17551
17552 #[simd_test(enable = "neon")]
17553 unsafe fn test_vceqz_u16() {
17554 let a: u16x4 = u16x4::new(0, 0x00, 0x01, 0x02);
17555 let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0, 0);
17556 let r: u16x4 = transmute(vceqz_u16(transmute(a)));
17557 assert_eq!(r, e);
17558 }
17559
17560 #[simd_test(enable = "neon")]
17561 unsafe fn test_vceqzq_u16() {
17562 let a: u16x8 = u16x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17563 let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0, 0);
17564 let r: u16x8 = transmute(vceqzq_u16(transmute(a)));
17565 assert_eq!(r, e);
17566 }
17567
17568 #[simd_test(enable = "neon")]
17569 unsafe fn test_vceqz_u32() {
17570 let a: u32x2 = u32x2::new(0, 0x00);
17571 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
17572 let r: u32x2 = transmute(vceqz_u32(transmute(a)));
17573 assert_eq!(r, e);
17574 }
17575
17576 #[simd_test(enable = "neon")]
17577 unsafe fn test_vceqzq_u32() {
17578 let a: u32x4 = u32x4::new(0, 0x00, 0x01, 0x02);
17579 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
17580 let r: u32x4 = transmute(vceqzq_u32(transmute(a)));
17581 assert_eq!(r, e);
17582 }
17583
17584 #[simd_test(enable = "neon")]
17585 unsafe fn test_vceqz_u64() {
17586 let a: u64x1 = u64x1::new(0);
17587 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17588 let r: u64x1 = transmute(vceqz_u64(transmute(a)));
17589 assert_eq!(r, e);
17590 }
17591
17592 #[simd_test(enable = "neon")]
17593 unsafe fn test_vceqzq_u64() {
17594 let a: u64x2 = u64x2::new(0, 0x00);
17595 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17596 let r: u64x2 = transmute(vceqzq_u64(transmute(a)));
17597 assert_eq!(r, e);
17598 }
17599
17600 #[simd_test(enable = "neon")]
17601 unsafe fn test_vceqz_f32() {
17602 let a: f32x2 = f32x2::new(0.0, 1.2);
17603 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
17604 let r: u32x2 = transmute(vceqz_f32(transmute(a)));
17605 assert_eq!(r, e);
17606 }
17607
17608 #[simd_test(enable = "neon")]
17609 unsafe fn test_vceqzq_f32() {
17610 let a: f32x4 = f32x4::new(0.0, 1.2, 3.4, 5.6);
17611 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0, 0);
17612 let r: u32x4 = transmute(vceqzq_f32(transmute(a)));
17613 assert_eq!(r, e);
17614 }
17615
17616 #[simd_test(enable = "neon")]
17617 unsafe fn test_vceqz_f64() {
17618 let a: f64 = 0.0;
17619 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17620 let r: u64x1 = transmute(vceqz_f64(transmute(a)));
17621 assert_eq!(r, e);
17622 }
17623
17624 #[simd_test(enable = "neon")]
17625 unsafe fn test_vceqzq_f64() {
17626 let a: f64x2 = f64x2::new(0.0, 1.2);
17627 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17628 let r: u64x2 = transmute(vceqzq_f64(transmute(a)));
17629 assert_eq!(r, e);
17630 }
17631
17632 #[simd_test(enable = "neon")]
17633 unsafe fn test_vceqzd_s64() {
17634 let a: i64 = 1;
17635 let e: u64 = 0;
17636 let r: u64 = transmute(vceqzd_s64(transmute(a)));
17637 assert_eq!(r, e);
17638 }
17639
17640 #[simd_test(enable = "neon")]
17641 unsafe fn test_vceqzd_u64() {
17642 let a: u64 = 1;
17643 let e: u64 = 0;
17644 let r: u64 = transmute(vceqzd_u64(transmute(a)));
17645 assert_eq!(r, e);
17646 }
17647
17648 #[simd_test(enable = "neon")]
17649 unsafe fn test_vceqzs_f32() {
17650 let a: f32 = 1.;
17651 let e: u32 = 0;
17652 let r: u32 = transmute(vceqzs_f32(transmute(a)));
17653 assert_eq!(r, e);
17654 }
17655
17656 #[simd_test(enable = "neon")]
17657 unsafe fn test_vceqzd_f64() {
17658 let a: f64 = 1.;
17659 let e: u64 = 0;
17660 let r: u64 = transmute(vceqzd_f64(transmute(a)));
17661 assert_eq!(r, e);
17662 }
17663
17664 #[simd_test(enable = "neon")]
17665 unsafe fn test_vtst_s64() {
17666 let a: i64x1 = i64x1::new(-9223372036854775808);
17667 let b: i64x1 = i64x1::new(-9223372036854775808);
17668 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17669 let r: u64x1 = transmute(vtst_s64(transmute(a), transmute(b)));
17670 assert_eq!(r, e);
17671 }
17672
17673 #[simd_test(enable = "neon")]
17674 unsafe fn test_vtstq_s64() {
17675 let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17676 let b: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17677 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17678 let r: u64x2 = transmute(vtstq_s64(transmute(a), transmute(b)));
17679 assert_eq!(r, e);
17680 }
17681
17682 #[simd_test(enable = "neon")]
17683 unsafe fn test_vtst_p64() {
17684 let a: i64x1 = i64x1::new(-9223372036854775808);
17685 let b: i64x1 = i64x1::new(-9223372036854775808);
17686 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17687 let r: u64x1 = transmute(vtst_p64(transmute(a), transmute(b)));
17688 assert_eq!(r, e);
17689 }
17690
17691 #[simd_test(enable = "neon")]
17692 unsafe fn test_vtstq_p64() {
17693 let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17694 let b: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17695 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17696 let r: u64x2 = transmute(vtstq_p64(transmute(a), transmute(b)));
17697 assert_eq!(r, e);
17698 }
17699
17700 #[simd_test(enable = "neon")]
17701 unsafe fn test_vtst_u64() {
17702 let a: u64x1 = u64x1::new(0);
17703 let b: u64x1 = u64x1::new(0);
17704 let e: u64x1 = u64x1::new(0);
17705 let r: u64x1 = transmute(vtst_u64(transmute(a), transmute(b)));
17706 assert_eq!(r, e);
17707 }
17708
17709 #[simd_test(enable = "neon")]
17710 unsafe fn test_vtstq_u64() {
17711 let a: u64x2 = u64x2::new(0, 0x00);
17712 let b: u64x2 = u64x2::new(0, 0x00);
17713 let e: u64x2 = u64x2::new(0, 0);
17714 let r: u64x2 = transmute(vtstq_u64(transmute(a), transmute(b)));
17715 assert_eq!(r, e);
17716 }
17717
17718 #[simd_test(enable = "neon")]
17719 unsafe fn test_vtstd_s64() {
17720 let a: i64 = 0;
17721 let b: i64 = 0;
17722 let e: u64 = 0;
17723 let r: u64 = transmute(vtstd_s64(transmute(a), transmute(b)));
17724 assert_eq!(r, e);
17725 }
17726
17727 #[simd_test(enable = "neon")]
17728 unsafe fn test_vtstd_u64() {
17729 let a: u64 = 0;
17730 let b: u64 = 0;
17731 let e: u64 = 0;
17732 let r: u64 = transmute(vtstd_u64(transmute(a), transmute(b)));
17733 assert_eq!(r, e);
17734 }
17735
17736 #[simd_test(enable = "neon")]
17737 unsafe fn test_vuqadds_s32() {
17738 let a: i32 = 1;
17739 let b: u32 = 1;
17740 let e: i32 = 2;
17741 let r: i32 = transmute(vuqadds_s32(transmute(a), transmute(b)));
17742 assert_eq!(r, e);
17743 }
17744
17745 #[simd_test(enable = "neon")]
17746 unsafe fn test_vuqaddd_s64() {
17747 let a: i64 = 1;
17748 let b: u64 = 1;
17749 let e: i64 = 2;
17750 let r: i64 = transmute(vuqaddd_s64(transmute(a), transmute(b)));
17751 assert_eq!(r, e);
17752 }
17753
17754 #[simd_test(enable = "neon")]
17755 unsafe fn test_vuqaddb_s8() {
17756 let a: i8 = 1;
17757 let b: u8 = 2;
17758 let e: i8 = 3;
17759 let r: i8 = transmute(vuqaddb_s8(transmute(a), transmute(b)));
17760 assert_eq!(r, e);
17761 }
17762
17763 #[simd_test(enable = "neon")]
17764 unsafe fn test_vuqaddh_s16() {
17765 let a: i16 = 1;
17766 let b: u16 = 2;
17767 let e: i16 = 3;
17768 let r: i16 = transmute(vuqaddh_s16(transmute(a), transmute(b)));
17769 assert_eq!(r, e);
17770 }
17771
17772 #[simd_test(enable = "neon")]
17773 unsafe fn test_vabs_f64() {
17774 let a: f64 = -0.1;
17775 let e: f64 = 0.1;
17776 let r: f64 = transmute(vabs_f64(transmute(a)));
17777 assert_eq!(r, e);
17778 }
17779
17780 #[simd_test(enable = "neon")]
17781 unsafe fn test_vabsq_f64() {
17782 let a: f64x2 = f64x2::new(-0.1, -2.2);
17783 let e: f64x2 = f64x2::new(0.1, 2.2);
17784 let r: f64x2 = transmute(vabsq_f64(transmute(a)));
17785 assert_eq!(r, e);
17786 }
17787
17788 #[simd_test(enable = "neon")]
17789 unsafe fn test_vcgt_s64() {
17790 let a: i64x1 = i64x1::new(1);
17791 let b: i64x1 = i64x1::new(0);
17792 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17793 let r: u64x1 = transmute(vcgt_s64(transmute(a), transmute(b)));
17794 assert_eq!(r, e);
17795 }
17796
17797 #[simd_test(enable = "neon")]
17798 unsafe fn test_vcgtq_s64() {
17799 let a: i64x2 = i64x2::new(1, 2);
17800 let b: i64x2 = i64x2::new(0, 1);
17801 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17802 let r: u64x2 = transmute(vcgtq_s64(transmute(a), transmute(b)));
17803 assert_eq!(r, e);
17804 }
17805
17806 #[simd_test(enable = "neon")]
17807 unsafe fn test_vcgt_u64() {
17808 let a: u64x1 = u64x1::new(1);
17809 let b: u64x1 = u64x1::new(0);
17810 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17811 let r: u64x1 = transmute(vcgt_u64(transmute(a), transmute(b)));
17812 assert_eq!(r, e);
17813 }
17814
17815 #[simd_test(enable = "neon")]
17816 unsafe fn test_vcgtq_u64() {
17817 let a: u64x2 = u64x2::new(1, 2);
17818 let b: u64x2 = u64x2::new(0, 1);
17819 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17820 let r: u64x2 = transmute(vcgtq_u64(transmute(a), transmute(b)));
17821 assert_eq!(r, e);
17822 }
17823
17824 #[simd_test(enable = "neon")]
17825 unsafe fn test_vcgt_f64() {
17826 let a: f64 = 1.2;
17827 let b: f64 = 0.1;
17828 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17829 let r: u64x1 = transmute(vcgt_f64(transmute(a), transmute(b)));
17830 assert_eq!(r, e);
17831 }
17832
17833 #[simd_test(enable = "neon")]
17834 unsafe fn test_vcgtq_f64() {
17835 let a: f64x2 = f64x2::new(1.2, 2.3);
17836 let b: f64x2 = f64x2::new(0.1, 1.2);
17837 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17838 let r: u64x2 = transmute(vcgtq_f64(transmute(a), transmute(b)));
17839 assert_eq!(r, e);
17840 }
17841
17842 #[simd_test(enable = "neon")]
17843 unsafe fn test_vcgtd_s64() {
17844 let a: i64 = 1;
17845 let b: i64 = 2;
17846 let e: u64 = 0;
17847 let r: u64 = transmute(vcgtd_s64(transmute(a), transmute(b)));
17848 assert_eq!(r, e);
17849 }
17850
17851 #[simd_test(enable = "neon")]
17852 unsafe fn test_vcgtd_u64() {
17853 let a: u64 = 1;
17854 let b: u64 = 2;
17855 let e: u64 = 0;
17856 let r: u64 = transmute(vcgtd_u64(transmute(a), transmute(b)));
17857 assert_eq!(r, e);
17858 }
17859
17860 #[simd_test(enable = "neon")]
17861 unsafe fn test_vcgts_f32() {
17862 let a: f32 = 1.;
17863 let b: f32 = 2.;
17864 let e: u32 = 0;
17865 let r: u32 = transmute(vcgts_f32(transmute(a), transmute(b)));
17866 assert_eq!(r, e);
17867 }
17868
17869 #[simd_test(enable = "neon")]
17870 unsafe fn test_vcgtd_f64() {
17871 let a: f64 = 1.;
17872 let b: f64 = 2.;
17873 let e: u64 = 0;
17874 let r: u64 = transmute(vcgtd_f64(transmute(a), transmute(b)));
17875 assert_eq!(r, e);
17876 }
17877
17878 #[simd_test(enable = "neon")]
17879 unsafe fn test_vclt_s64() {
17880 let a: i64x1 = i64x1::new(0);
17881 let b: i64x1 = i64x1::new(1);
17882 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17883 let r: u64x1 = transmute(vclt_s64(transmute(a), transmute(b)));
17884 assert_eq!(r, e);
17885 }
17886
17887 #[simd_test(enable = "neon")]
17888 unsafe fn test_vcltq_s64() {
17889 let a: i64x2 = i64x2::new(0, 1);
17890 let b: i64x2 = i64x2::new(1, 2);
17891 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17892 let r: u64x2 = transmute(vcltq_s64(transmute(a), transmute(b)));
17893 assert_eq!(r, e);
17894 }
17895
17896 #[simd_test(enable = "neon")]
17897 unsafe fn test_vclt_u64() {
17898 let a: u64x1 = u64x1::new(0);
17899 let b: u64x1 = u64x1::new(1);
17900 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17901 let r: u64x1 = transmute(vclt_u64(transmute(a), transmute(b)));
17902 assert_eq!(r, e);
17903 }
17904
17905 #[simd_test(enable = "neon")]
17906 unsafe fn test_vcltq_u64() {
17907 let a: u64x2 = u64x2::new(0, 1);
17908 let b: u64x2 = u64x2::new(1, 2);
17909 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17910 let r: u64x2 = transmute(vcltq_u64(transmute(a), transmute(b)));
17911 assert_eq!(r, e);
17912 }
17913
17914 #[simd_test(enable = "neon")]
17915 unsafe fn test_vclt_f64() {
17916 let a: f64 = 0.1;
17917 let b: f64 = 1.2;
17918 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17919 let r: u64x1 = transmute(vclt_f64(transmute(a), transmute(b)));
17920 assert_eq!(r, e);
17921 }
17922
17923 #[simd_test(enable = "neon")]
17924 unsafe fn test_vcltq_f64() {
17925 let a: f64x2 = f64x2::new(0.1, 1.2);
17926 let b: f64x2 = f64x2::new(1.2, 2.3);
17927 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17928 let r: u64x2 = transmute(vcltq_f64(transmute(a), transmute(b)));
17929 assert_eq!(r, e);
17930 }
17931
17932 #[simd_test(enable = "neon")]
17933 unsafe fn test_vcltd_s64() {
17934 let a: i64 = 2;
17935 let b: i64 = 1;
17936 let e: u64 = 0;
17937 let r: u64 = transmute(vcltd_s64(transmute(a), transmute(b)));
17938 assert_eq!(r, e);
17939 }
17940
17941 #[simd_test(enable = "neon")]
17942 unsafe fn test_vcltd_u64() {
17943 let a: u64 = 2;
17944 let b: u64 = 1;
17945 let e: u64 = 0;
17946 let r: u64 = transmute(vcltd_u64(transmute(a), transmute(b)));
17947 assert_eq!(r, e);
17948 }
17949
17950 #[simd_test(enable = "neon")]
17951 unsafe fn test_vclts_f32() {
17952 let a: f32 = 2.;
17953 let b: f32 = 1.;
17954 let e: u32 = 0;
17955 let r: u32 = transmute(vclts_f32(transmute(a), transmute(b)));
17956 assert_eq!(r, e);
17957 }
17958
17959 #[simd_test(enable = "neon")]
17960 unsafe fn test_vcltd_f64() {
17961 let a: f64 = 2.;
17962 let b: f64 = 1.;
17963 let e: u64 = 0;
17964 let r: u64 = transmute(vcltd_f64(transmute(a), transmute(b)));
17965 assert_eq!(r, e);
17966 }
17967
17968 #[simd_test(enable = "neon")]
17969 unsafe fn test_vcle_s64() {
17970 let a: i64x1 = i64x1::new(0);
17971 let b: i64x1 = i64x1::new(1);
17972 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17973 let r: u64x1 = transmute(vcle_s64(transmute(a), transmute(b)));
17974 assert_eq!(r, e);
17975 }
17976
17977 #[simd_test(enable = "neon")]
17978 unsafe fn test_vcleq_s64() {
17979 let a: i64x2 = i64x2::new(0, 1);
17980 let b: i64x2 = i64x2::new(1, 2);
17981 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17982 let r: u64x2 = transmute(vcleq_s64(transmute(a), transmute(b)));
17983 assert_eq!(r, e);
17984 }
17985
17986 #[simd_test(enable = "neon")]
17987 unsafe fn test_vcged_s64() {
17988 let a: i64 = 1;
17989 let b: i64 = 2;
17990 let e: u64 = 0;
17991 let r: u64 = transmute(vcged_s64(transmute(a), transmute(b)));
17992 assert_eq!(r, e);
17993 }
17994
17995 #[simd_test(enable = "neon")]
17996 unsafe fn test_vcged_u64() {
17997 let a: u64 = 1;
17998 let b: u64 = 2;
17999 let e: u64 = 0;
18000 let r: u64 = transmute(vcged_u64(transmute(a), transmute(b)));
18001 assert_eq!(r, e);
18002 }
18003
18004 #[simd_test(enable = "neon")]
18005 unsafe fn test_vcges_f32() {
18006 let a: f32 = 1.;
18007 let b: f32 = 2.;
18008 let e: u32 = 0;
18009 let r: u32 = transmute(vcges_f32(transmute(a), transmute(b)));
18010 assert_eq!(r, e);
18011 }
18012
18013 #[simd_test(enable = "neon")]
18014 unsafe fn test_vcged_f64() {
18015 let a: f64 = 1.;
18016 let b: f64 = 2.;
18017 let e: u64 = 0;
18018 let r: u64 = transmute(vcged_f64(transmute(a), transmute(b)));
18019 assert_eq!(r, e);
18020 }
18021
18022 #[simd_test(enable = "neon")]
18023 unsafe fn test_vcle_u64() {
18024 let a: u64x1 = u64x1::new(0);
18025 let b: u64x1 = u64x1::new(1);
18026 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18027 let r: u64x1 = transmute(vcle_u64(transmute(a), transmute(b)));
18028 assert_eq!(r, e);
18029 }
18030
18031 #[simd_test(enable = "neon")]
18032 unsafe fn test_vcleq_u64() {
18033 let a: u64x2 = u64x2::new(0, 1);
18034 let b: u64x2 = u64x2::new(1, 2);
18035 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18036 let r: u64x2 = transmute(vcleq_u64(transmute(a), transmute(b)));
18037 assert_eq!(r, e);
18038 }
18039
18040 #[simd_test(enable = "neon")]
18041 unsafe fn test_vcle_f64() {
18042 let a: f64 = 0.1;
18043 let b: f64 = 1.2;
18044 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18045 let r: u64x1 = transmute(vcle_f64(transmute(a), transmute(b)));
18046 assert_eq!(r, e);
18047 }
18048
18049 #[simd_test(enable = "neon")]
18050 unsafe fn test_vcleq_f64() {
18051 let a: f64x2 = f64x2::new(0.1, 1.2);
18052 let b: f64x2 = f64x2::new(1.2, 2.3);
18053 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18054 let r: u64x2 = transmute(vcleq_f64(transmute(a), transmute(b)));
18055 assert_eq!(r, e);
18056 }
18057
18058 #[simd_test(enable = "neon")]
18059 unsafe fn test_vcled_s64() {
18060 let a: i64 = 2;
18061 let b: i64 = 1;
18062 let e: u64 = 0;
18063 let r: u64 = transmute(vcled_s64(transmute(a), transmute(b)));
18064 assert_eq!(r, e);
18065 }
18066
18067 #[simd_test(enable = "neon")]
18068 unsafe fn test_vcled_u64() {
18069 let a: u64 = 2;
18070 let b: u64 = 1;
18071 let e: u64 = 0;
18072 let r: u64 = transmute(vcled_u64(transmute(a), transmute(b)));
18073 assert_eq!(r, e);
18074 }
18075
18076 #[simd_test(enable = "neon")]
18077 unsafe fn test_vcles_f32() {
18078 let a: f32 = 2.;
18079 let b: f32 = 1.;
18080 let e: u32 = 0;
18081 let r: u32 = transmute(vcles_f32(transmute(a), transmute(b)));
18082 assert_eq!(r, e);
18083 }
18084
18085 #[simd_test(enable = "neon")]
18086 unsafe fn test_vcled_f64() {
18087 let a: f64 = 2.;
18088 let b: f64 = 1.;
18089 let e: u64 = 0;
18090 let r: u64 = transmute(vcled_f64(transmute(a), transmute(b)));
18091 assert_eq!(r, e);
18092 }
18093
18094 #[simd_test(enable = "neon")]
18095 unsafe fn test_vcge_s64() {
18096 let a: i64x1 = i64x1::new(1);
18097 let b: i64x1 = i64x1::new(0);
18098 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18099 let r: u64x1 = transmute(vcge_s64(transmute(a), transmute(b)));
18100 assert_eq!(r, e);
18101 }
18102
18103 #[simd_test(enable = "neon")]
18104 unsafe fn test_vcgeq_s64() {
18105 let a: i64x2 = i64x2::new(1, 2);
18106 let b: i64x2 = i64x2::new(0, 1);
18107 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18108 let r: u64x2 = transmute(vcgeq_s64(transmute(a), transmute(b)));
18109 assert_eq!(r, e);
18110 }
18111
18112 #[simd_test(enable = "neon")]
18113 unsafe fn test_vcge_u64() {
18114 let a: u64x1 = u64x1::new(1);
18115 let b: u64x1 = u64x1::new(0);
18116 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18117 let r: u64x1 = transmute(vcge_u64(transmute(a), transmute(b)));
18118 assert_eq!(r, e);
18119 }
18120
18121 #[simd_test(enable = "neon")]
18122 unsafe fn test_vcgeq_u64() {
18123 let a: u64x2 = u64x2::new(1, 2);
18124 let b: u64x2 = u64x2::new(0, 1);
18125 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18126 let r: u64x2 = transmute(vcgeq_u64(transmute(a), transmute(b)));
18127 assert_eq!(r, e);
18128 }
18129
18130 #[simd_test(enable = "neon")]
18131 unsafe fn test_vcge_f64() {
18132 let a: f64 = 1.2;
18133 let b: f64 = 0.1;
18134 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18135 let r: u64x1 = transmute(vcge_f64(transmute(a), transmute(b)));
18136 assert_eq!(r, e);
18137 }
18138
18139 #[simd_test(enable = "neon")]
18140 unsafe fn test_vcgeq_f64() {
18141 let a: f64x2 = f64x2::new(1.2, 2.3);
18142 let b: f64x2 = f64x2::new(0.1, 1.2);
18143 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18144 let r: u64x2 = transmute(vcgeq_f64(transmute(a), transmute(b)));
18145 assert_eq!(r, e);
18146 }
18147
18148 #[simd_test(enable = "neon")]
18149 unsafe fn test_vcgez_s8() {
18150 let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18151 let e: u8x8 = u8x8::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18152 let r: u8x8 = transmute(vcgez_s8(transmute(a)));
18153 assert_eq!(r, e);
18154 }
18155
18156 #[simd_test(enable = "neon")]
18157 unsafe fn test_vcgezq_s8() {
18158 let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18159 let e: u8x16 = u8x16::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18160 let r: u8x16 = transmute(vcgezq_s8(transmute(a)));
18161 assert_eq!(r, e);
18162 }
18163
18164 #[simd_test(enable = "neon")]
18165 unsafe fn test_vcgez_s16() {
18166 let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18167 let e: u16x4 = u16x4::new(0, 0, 0xFF_FF, 0xFF_FF);
18168 let r: u16x4 = transmute(vcgez_s16(transmute(a)));
18169 assert_eq!(r, e);
18170 }
18171
18172 #[simd_test(enable = "neon")]
18173 unsafe fn test_vcgezq_s16() {
18174 let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18175 let e: u16x8 = u16x8::new(0, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
18176 let r: u16x8 = transmute(vcgezq_s16(transmute(a)));
18177 assert_eq!(r, e);
18178 }
18179
18180 #[simd_test(enable = "neon")]
18181 unsafe fn test_vcgez_s32() {
18182 let a: i32x2 = i32x2::new(-2147483648, -1);
18183 let e: u32x2 = u32x2::new(0, 0);
18184 let r: u32x2 = transmute(vcgez_s32(transmute(a)));
18185 assert_eq!(r, e);
18186 }
18187
18188 #[simd_test(enable = "neon")]
18189 unsafe fn test_vcgezq_s32() {
18190 let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18191 let e: u32x4 = u32x4::new(0, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18192 let r: u32x4 = transmute(vcgezq_s32(transmute(a)));
18193 assert_eq!(r, e);
18194 }
18195
18196 #[simd_test(enable = "neon")]
18197 unsafe fn test_vcgez_s64() {
18198 let a: i64x1 = i64x1::new(-9223372036854775808);
18199 let e: u64x1 = u64x1::new(0);
18200 let r: u64x1 = transmute(vcgez_s64(transmute(a)));
18201 assert_eq!(r, e);
18202 }
18203
18204 #[simd_test(enable = "neon")]
18205 unsafe fn test_vcgezq_s64() {
18206 let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18207 let e: u64x2 = u64x2::new(0, 0);
18208 let r: u64x2 = transmute(vcgezq_s64(transmute(a)));
18209 assert_eq!(r, e);
18210 }
18211
18212 #[simd_test(enable = "neon")]
18213 unsafe fn test_vcgez_f32() {
18214 let a: f32x2 = f32x2::new(-1.2, 0.0);
18215 let e: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
18216 let r: u32x2 = transmute(vcgez_f32(transmute(a)));
18217 assert_eq!(r, e);
18218 }
18219
18220 #[simd_test(enable = "neon")]
18221 unsafe fn test_vcgezq_f32() {
18222 let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18223 let e: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18224 let r: u32x4 = transmute(vcgezq_f32(transmute(a)));
18225 assert_eq!(r, e);
18226 }
18227
18228 #[simd_test(enable = "neon")]
18229 unsafe fn test_vcgez_f64() {
18230 let a: f64 = -1.2;
18231 let e: u64x1 = u64x1::new(0);
18232 let r: u64x1 = transmute(vcgez_f64(transmute(a)));
18233 assert_eq!(r, e);
18234 }
18235
18236 #[simd_test(enable = "neon")]
18237 unsafe fn test_vcgezq_f64() {
18238 let a: f64x2 = f64x2::new(-1.2, 0.0);
18239 let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18240 let r: u64x2 = transmute(vcgezq_f64(transmute(a)));
18241 assert_eq!(r, e);
18242 }
18243
18244 #[simd_test(enable = "neon")]
18245 unsafe fn test_vcgezd_s64() {
18246 let a: i64 = -1;
18247 let e: u64 = 0;
18248 let r: u64 = transmute(vcgezd_s64(transmute(a)));
18249 assert_eq!(r, e);
18250 }
18251
18252 #[simd_test(enable = "neon")]
18253 unsafe fn test_vcgezs_f32() {
18254 let a: f32 = -1.;
18255 let e: u32 = 0;
18256 let r: u32 = transmute(vcgezs_f32(transmute(a)));
18257 assert_eq!(r, e);
18258 }
18259
18260 #[simd_test(enable = "neon")]
18261 unsafe fn test_vcgezd_f64() {
18262 let a: f64 = -1.;
18263 let e: u64 = 0;
18264 let r: u64 = transmute(vcgezd_f64(transmute(a)));
18265 assert_eq!(r, e);
18266 }
18267
18268 #[simd_test(enable = "neon")]
18269 unsafe fn test_vcgtz_s8() {
18270 let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18271 let e: u8x8 = u8x8::new(0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18272 let r: u8x8 = transmute(vcgtz_s8(transmute(a)));
18273 assert_eq!(r, e);
18274 }
18275
18276 #[simd_test(enable = "neon")]
18277 unsafe fn test_vcgtzq_s8() {
18278 let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18279 let e: u8x16 = u8x16::new(0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18280 let r: u8x16 = transmute(vcgtzq_s8(transmute(a)));
18281 assert_eq!(r, e);
18282 }
18283
18284 #[simd_test(enable = "neon")]
18285 unsafe fn test_vcgtz_s16() {
18286 let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18287 let e: u16x4 = u16x4::new(0, 0, 0, 0xFF_FF);
18288 let r: u16x4 = transmute(vcgtz_s16(transmute(a)));
18289 assert_eq!(r, e);
18290 }
18291
18292 #[simd_test(enable = "neon")]
18293 unsafe fn test_vcgtzq_s16() {
18294 let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18295 let e: u16x8 = u16x8::new(0, 0, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
18296 let r: u16x8 = transmute(vcgtzq_s16(transmute(a)));
18297 assert_eq!(r, e);
18298 }
18299
18300 #[simd_test(enable = "neon")]
18301 unsafe fn test_vcgtz_s32() {
18302 let a: i32x2 = i32x2::new(-2147483648, -1);
18303 let e: u32x2 = u32x2::new(0, 0);
18304 let r: u32x2 = transmute(vcgtz_s32(transmute(a)));
18305 assert_eq!(r, e);
18306 }
18307
18308 #[simd_test(enable = "neon")]
18309 unsafe fn test_vcgtzq_s32() {
18310 let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18311 let e: u32x4 = u32x4::new(0, 0, 0, 0xFF_FF_FF_FF);
18312 let r: u32x4 = transmute(vcgtzq_s32(transmute(a)));
18313 assert_eq!(r, e);
18314 }
18315
18316 #[simd_test(enable = "neon")]
18317 unsafe fn test_vcgtz_s64() {
18318 let a: i64x1 = i64x1::new(-9223372036854775808);
18319 let e: u64x1 = u64x1::new(0);
18320 let r: u64x1 = transmute(vcgtz_s64(transmute(a)));
18321 assert_eq!(r, e);
18322 }
18323
18324 #[simd_test(enable = "neon")]
18325 unsafe fn test_vcgtzq_s64() {
18326 let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18327 let e: u64x2 = u64x2::new(0, 0);
18328 let r: u64x2 = transmute(vcgtzq_s64(transmute(a)));
18329 assert_eq!(r, e);
18330 }
18331
18332 #[simd_test(enable = "neon")]
18333 unsafe fn test_vcgtz_f32() {
18334 let a: f32x2 = f32x2::new(-1.2, 0.0);
18335 let e: u32x2 = u32x2::new(0, 0);
18336 let r: u32x2 = transmute(vcgtz_f32(transmute(a)));
18337 assert_eq!(r, e);
18338 }
18339
18340 #[simd_test(enable = "neon")]
18341 unsafe fn test_vcgtzq_f32() {
18342 let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18343 let e: u32x4 = u32x4::new(0, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18344 let r: u32x4 = transmute(vcgtzq_f32(transmute(a)));
18345 assert_eq!(r, e);
18346 }
18347
18348 #[simd_test(enable = "neon")]
18349 unsafe fn test_vcgtz_f64() {
18350 let a: f64 = -1.2;
18351 let e: u64x1 = u64x1::new(0);
18352 let r: u64x1 = transmute(vcgtz_f64(transmute(a)));
18353 assert_eq!(r, e);
18354 }
18355
18356 #[simd_test(enable = "neon")]
18357 unsafe fn test_vcgtzq_f64() {
18358 let a: f64x2 = f64x2::new(-1.2, 0.0);
18359 let e: u64x2 = u64x2::new(0, 0);
18360 let r: u64x2 = transmute(vcgtzq_f64(transmute(a)));
18361 assert_eq!(r, e);
18362 }
18363
18364 #[simd_test(enable = "neon")]
18365 unsafe fn test_vcgtzd_s64() {
18366 let a: i64 = -1;
18367 let e: u64 = 0;
18368 let r: u64 = transmute(vcgtzd_s64(transmute(a)));
18369 assert_eq!(r, e);
18370 }
18371
18372 #[simd_test(enable = "neon")]
18373 unsafe fn test_vcgtzs_f32() {
18374 let a: f32 = -1.;
18375 let e: u32 = 0;
18376 let r: u32 = transmute(vcgtzs_f32(transmute(a)));
18377 assert_eq!(r, e);
18378 }
18379
18380 #[simd_test(enable = "neon")]
18381 unsafe fn test_vcgtzd_f64() {
18382 let a: f64 = -1.;
18383 let e: u64 = 0;
18384 let r: u64 = transmute(vcgtzd_f64(transmute(a)));
18385 assert_eq!(r, e);
18386 }
18387
18388 #[simd_test(enable = "neon")]
18389 unsafe fn test_vclez_s8() {
18390 let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18391 let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0, 0, 0, 0, 0);
18392 let r: u8x8 = transmute(vclez_s8(transmute(a)));
18393 assert_eq!(r, e);
18394 }
18395
18396 #[simd_test(enable = "neon")]
18397 unsafe fn test_vclezq_s8() {
18398 let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18399 let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18400 let r: u8x16 = transmute(vclezq_s8(transmute(a)));
18401 assert_eq!(r, e);
18402 }
18403
18404 #[simd_test(enable = "neon")]
18405 unsafe fn test_vclez_s16() {
18406 let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18407 let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0);
18408 let r: u16x4 = transmute(vclez_s16(transmute(a)));
18409 assert_eq!(r, e);
18410 }
18411
18412 #[simd_test(enable = "neon")]
18413 unsafe fn test_vclezq_s16() {
18414 let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18415 let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0);
18416 let r: u16x8 = transmute(vclezq_s16(transmute(a)));
18417 assert_eq!(r, e);
18418 }
18419
18420 #[simd_test(enable = "neon")]
18421 unsafe fn test_vclez_s32() {
18422 let a: i32x2 = i32x2::new(-2147483648, -1);
18423 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18424 let r: u32x2 = transmute(vclez_s32(transmute(a)));
18425 assert_eq!(r, e);
18426 }
18427
18428 #[simd_test(enable = "neon")]
18429 unsafe fn test_vclezq_s32() {
18430 let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18431 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0);
18432 let r: u32x4 = transmute(vclezq_s32(transmute(a)));
18433 assert_eq!(r, e);
18434 }
18435
18436 #[simd_test(enable = "neon")]
18437 unsafe fn test_vclez_s64() {
18438 let a: i64x1 = i64x1::new(-9223372036854775808);
18439 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18440 let r: u64x1 = transmute(vclez_s64(transmute(a)));
18441 assert_eq!(r, e);
18442 }
18443
18444 #[simd_test(enable = "neon")]
18445 unsafe fn test_vclezq_s64() {
18446 let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18447 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18448 let r: u64x2 = transmute(vclezq_s64(transmute(a)));
18449 assert_eq!(r, e);
18450 }
18451
18452 #[simd_test(enable = "neon")]
18453 unsafe fn test_vclez_f32() {
18454 let a: f32x2 = f32x2::new(-1.2, 0.0);
18455 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18456 let r: u32x2 = transmute(vclez_f32(transmute(a)));
18457 assert_eq!(r, e);
18458 }
18459
18460 #[simd_test(enable = "neon")]
18461 unsafe fn test_vclezq_f32() {
18462 let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18463 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
18464 let r: u32x4 = transmute(vclezq_f32(transmute(a)));
18465 assert_eq!(r, e);
18466 }
18467
18468 #[simd_test(enable = "neon")]
18469 unsafe fn test_vclez_f64() {
18470 let a: f64 = -1.2;
18471 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18472 let r: u64x1 = transmute(vclez_f64(transmute(a)));
18473 assert_eq!(r, e);
18474 }
18475
18476 #[simd_test(enable = "neon")]
18477 unsafe fn test_vclezq_f64() {
18478 let a: f64x2 = f64x2::new(-1.2, 0.0);
18479 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18480 let r: u64x2 = transmute(vclezq_f64(transmute(a)));
18481 assert_eq!(r, e);
18482 }
18483
18484 #[simd_test(enable = "neon")]
18485 unsafe fn test_vclezd_s64() {
18486 let a: i64 = 2;
18487 let e: u64 = 0;
18488 let r: u64 = transmute(vclezd_s64(transmute(a)));
18489 assert_eq!(r, e);
18490 }
18491
18492 #[simd_test(enable = "neon")]
18493 unsafe fn test_vclezs_f32() {
18494 let a: f32 = 2.;
18495 let e: u32 = 0;
18496 let r: u32 = transmute(vclezs_f32(transmute(a)));
18497 assert_eq!(r, e);
18498 }
18499
18500 #[simd_test(enable = "neon")]
18501 unsafe fn test_vclezd_f64() {
18502 let a: f64 = 2.;
18503 let e: u64 = 0;
18504 let r: u64 = transmute(vclezd_f64(transmute(a)));
18505 assert_eq!(r, e);
18506 }
18507
18508 #[simd_test(enable = "neon")]
18509 unsafe fn test_vcltz_s8() {
18510 let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18511 let e: u8x8 = u8x8::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0);
18512 let r: u8x8 = transmute(vcltz_s8(transmute(a)));
18513 assert_eq!(r, e);
18514 }
18515
18516 #[simd_test(enable = "neon")]
18517 unsafe fn test_vcltzq_s8() {
18518 let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18519 let e: u8x16 = u8x16::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18520 let r: u8x16 = transmute(vcltzq_s8(transmute(a)));
18521 assert_eq!(r, e);
18522 }
18523
18524 #[simd_test(enable = "neon")]
18525 unsafe fn test_vcltz_s16() {
18526 let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18527 let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0, 0);
18528 let r: u16x4 = transmute(vcltz_s16(transmute(a)));
18529 assert_eq!(r, e);
18530 }
18531
18532 #[simd_test(enable = "neon")]
18533 unsafe fn test_vcltzq_s16() {
18534 let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18535 let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0, 0);
18536 let r: u16x8 = transmute(vcltzq_s16(transmute(a)));
18537 assert_eq!(r, e);
18538 }
18539
18540 #[simd_test(enable = "neon")]
18541 unsafe fn test_vcltz_s32() {
18542 let a: i32x2 = i32x2::new(-2147483648, -1);
18543 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18544 let r: u32x2 = transmute(vcltz_s32(transmute(a)));
18545 assert_eq!(r, e);
18546 }
18547
18548 #[simd_test(enable = "neon")]
18549 unsafe fn test_vcltzq_s32() {
18550 let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18551 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
18552 let r: u32x4 = transmute(vcltzq_s32(transmute(a)));
18553 assert_eq!(r, e);
18554 }
18555
18556 #[simd_test(enable = "neon")]
18557 unsafe fn test_vcltz_s64() {
18558 let a: i64x1 = i64x1::new(-9223372036854775808);
18559 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18560 let r: u64x1 = transmute(vcltz_s64(transmute(a)));
18561 assert_eq!(r, e);
18562 }
18563
18564 #[simd_test(enable = "neon")]
18565 unsafe fn test_vcltzq_s64() {
18566 let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18567 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18568 let r: u64x2 = transmute(vcltzq_s64(transmute(a)));
18569 assert_eq!(r, e);
18570 }
18571
18572 #[simd_test(enable = "neon")]
18573 unsafe fn test_vcltz_f32() {
18574 let a: f32x2 = f32x2::new(-1.2, 0.0);
18575 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
18576 let r: u32x2 = transmute(vcltz_f32(transmute(a)));
18577 assert_eq!(r, e);
18578 }
18579
18580 #[simd_test(enable = "neon")]
18581 unsafe fn test_vcltzq_f32() {
18582 let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18583 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0, 0);
18584 let r: u32x4 = transmute(vcltzq_f32(transmute(a)));
18585 assert_eq!(r, e);
18586 }
18587
18588 #[simd_test(enable = "neon")]
18589 unsafe fn test_vcltz_f64() {
18590 let a: f64 = -1.2;
18591 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18592 let r: u64x1 = transmute(vcltz_f64(transmute(a)));
18593 assert_eq!(r, e);
18594 }
18595
18596 #[simd_test(enable = "neon")]
18597 unsafe fn test_vcltzq_f64() {
18598 let a: f64x2 = f64x2::new(-1.2, 0.0);
18599 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
18600 let r: u64x2 = transmute(vcltzq_f64(transmute(a)));
18601 assert_eq!(r, e);
18602 }
18603
18604 #[simd_test(enable = "neon")]
18605 unsafe fn test_vcltzd_s64() {
18606 let a: i64 = 2;
18607 let e: u64 = 0;
18608 let r: u64 = transmute(vcltzd_s64(transmute(a)));
18609 assert_eq!(r, e);
18610 }
18611
18612 #[simd_test(enable = "neon")]
18613 unsafe fn test_vcltzs_f32() {
18614 let a: f32 = 2.;
18615 let e: u32 = 0;
18616 let r: u32 = transmute(vcltzs_f32(transmute(a)));
18617 assert_eq!(r, e);
18618 }
18619
18620 #[simd_test(enable = "neon")]
18621 unsafe fn test_vcltzd_f64() {
18622 let a: f64 = 2.;
18623 let e: u64 = 0;
18624 let r: u64 = transmute(vcltzd_f64(transmute(a)));
18625 assert_eq!(r, e);
18626 }
18627
18628 #[simd_test(enable = "neon")]
18629 unsafe fn test_vcagt_f64() {
18630 let a: f64 = -1.2;
18631 let b: f64 = -1.1;
18632 let e: u64x1 = u64x1::new(!0);
18633 let r: u64x1 = transmute(vcagt_f64(transmute(a), transmute(b)));
18634 assert_eq!(r, e);
18635 }
18636
18637 #[simd_test(enable = "neon")]
18638 unsafe fn test_vcagtq_f64() {
18639 let a: f64x2 = f64x2::new(-1.2, 0.0);
18640 let b: f64x2 = f64x2::new(-1.1, 0.0);
18641 let e: u64x2 = u64x2::new(!0, 0);
18642 let r: u64x2 = transmute(vcagtq_f64(transmute(a), transmute(b)));
18643 assert_eq!(r, e);
18644 }
18645
18646 #[simd_test(enable = "neon")]
18647 unsafe fn test_vcagts_f32() {
18648 let a: f32 = -1.2;
18649 let b: f32 = -1.1;
18650 let e: u32 = !0;
18651 let r: u32 = transmute(vcagts_f32(transmute(a), transmute(b)));
18652 assert_eq!(r, e);
18653 }
18654
18655 #[simd_test(enable = "neon")]
18656 unsafe fn test_vcagtd_f64() {
18657 let a: f64 = -1.2;
18658 let b: f64 = -1.1;
18659 let e: u64 = !0;
18660 let r: u64 = transmute(vcagtd_f64(transmute(a), transmute(b)));
18661 assert_eq!(r, e);
18662 }
18663
18664 #[simd_test(enable = "neon")]
18665 unsafe fn test_vcage_f64() {
18666 let a: f64 = -1.2;
18667 let b: f64 = -1.1;
18668 let e: u64x1 = u64x1::new(!0);
18669 let r: u64x1 = transmute(vcage_f64(transmute(a), transmute(b)));
18670 assert_eq!(r, e);
18671 }
18672
18673 #[simd_test(enable = "neon")]
18674 unsafe fn test_vcageq_f64() {
18675 let a: f64x2 = f64x2::new(-1.2, 0.0);
18676 let b: f64x2 = f64x2::new(-1.1, 0.0);
18677 let e: u64x2 = u64x2::new(!0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18678 let r: u64x2 = transmute(vcageq_f64(transmute(a), transmute(b)));
18679 assert_eq!(r, e);
18680 }
18681
18682 #[simd_test(enable = "neon")]
18683 unsafe fn test_vcages_f32() {
18684 let a: f32 = -1.2;
18685 let b: f32 = -1.1;
18686 let e: u32 = !0;
18687 let r: u32 = transmute(vcages_f32(transmute(a), transmute(b)));
18688 assert_eq!(r, e);
18689 }
18690
18691 #[simd_test(enable = "neon")]
18692 unsafe fn test_vcaged_f64() {
18693 let a: f64 = -1.2;
18694 let b: f64 = -1.1;
18695 let e: u64 = !0;
18696 let r: u64 = transmute(vcaged_f64(transmute(a), transmute(b)));
18697 assert_eq!(r, e);
18698 }
18699
18700 #[simd_test(enable = "neon")]
18701 unsafe fn test_vcalt_f64() {
18702 let a: f64 = -1.2;
18703 let b: f64 = -1.1;
18704 let e: u64x1 = u64x1::new(0);
18705 let r: u64x1 = transmute(vcalt_f64(transmute(a), transmute(b)));
18706 assert_eq!(r, e);
18707 }
18708
18709 #[simd_test(enable = "neon")]
18710 unsafe fn test_vcaltq_f64() {
18711 let a: f64x2 = f64x2::new(-1.2, 0.0);
18712 let b: f64x2 = f64x2::new(-1.1, 0.0);
18713 let e: u64x2 = u64x2::new(0, 0);
18714 let r: u64x2 = transmute(vcaltq_f64(transmute(a), transmute(b)));
18715 assert_eq!(r, e);
18716 }
18717
18718 #[simd_test(enable = "neon")]
18719 unsafe fn test_vcalts_f32() {
18720 let a: f32 = -1.2;
18721 let b: f32 = -1.1;
18722 let e: u32 = 0;
18723 let r: u32 = transmute(vcalts_f32(transmute(a), transmute(b)));
18724 assert_eq!(r, e);
18725 }
18726
18727 #[simd_test(enable = "neon")]
18728 unsafe fn test_vcaltd_f64() {
18729 let a: f64 = -1.2;
18730 let b: f64 = -1.1;
18731 let e: u64 = 0;
18732 let r: u64 = transmute(vcaltd_f64(transmute(a), transmute(b)));
18733 assert_eq!(r, e);
18734 }
18735
18736 #[simd_test(enable = "neon")]
18737 unsafe fn test_vcale_f64() {
18738 let a: f64 = -1.2;
18739 let b: f64 = -1.1;
18740 let e: u64x1 = u64x1::new(0);
18741 let r: u64x1 = transmute(vcale_f64(transmute(a), transmute(b)));
18742 assert_eq!(r, e);
18743 }
18744
18745 #[simd_test(enable = "neon")]
18746 unsafe fn test_vcaleq_f64() {
18747 let a: f64x2 = f64x2::new(-1.2, 0.0);
18748 let b: f64x2 = f64x2::new(-1.1, 0.0);
18749 let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18750 let r: u64x2 = transmute(vcaleq_f64(transmute(a), transmute(b)));
18751 assert_eq!(r, e);
18752 }
18753
18754 #[simd_test(enable = "neon")]
18755 unsafe fn test_vcales_f32() {
18756 let a: f32 = -1.2;
18757 let b: f32 = -1.1;
18758 let e: u32 = 0;
18759 let r: u32 = transmute(vcales_f32(transmute(a), transmute(b)));
18760 assert_eq!(r, e);
18761 }
18762
18763 #[simd_test(enable = "neon")]
18764 unsafe fn test_vcaled_f64() {
18765 let a: f64 = -1.2;
18766 let b: f64 = -1.1;
18767 let e: u64 = 0;
18768 let r: u64 = transmute(vcaled_f64(transmute(a), transmute(b)));
18769 assert_eq!(r, e);
18770 }
18771
18772 #[simd_test(enable = "neon")]
18773 unsafe fn test_vcopy_lane_s8() {
18774 let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18775 let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
18776 let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
18777 let r: i8x8 = transmute(vcopy_lane_s8::<0, 1>(transmute(a), transmute(b)));
18778 assert_eq!(r, e);
18779 }
18780
18781 #[simd_test(enable = "neon")]
18782 unsafe fn test_vcopyq_laneq_s8() {
18783 let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18784 let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18785 let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18786 let r: i8x16 = transmute(vcopyq_laneq_s8::<0, 1>(transmute(a), transmute(b)));
18787 assert_eq!(r, e);
18788 }
18789
18790 #[simd_test(enable = "neon")]
18791 unsafe fn test_vcopy_lane_s16() {
18792 let a: i16x4 = i16x4::new(1, 2, 3, 4);
18793 let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
18794 let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
18795 let r: i16x4 = transmute(vcopy_lane_s16::<0, 1>(transmute(a), transmute(b)));
18796 assert_eq!(r, e);
18797 }
18798
18799 #[simd_test(enable = "neon")]
18800 unsafe fn test_vcopyq_laneq_s16() {
18801 let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18802 let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
18803 let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
18804 let r: i16x8 = transmute(vcopyq_laneq_s16::<0, 1>(transmute(a), transmute(b)));
18805 assert_eq!(r, e);
18806 }
18807
18808 #[simd_test(enable = "neon")]
18809 unsafe fn test_vcopy_lane_s32() {
18810 let a: i32x2 = i32x2::new(1, 2);
18811 let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
18812 let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 2);
18813 let r: i32x2 = transmute(vcopy_lane_s32::<0, 1>(transmute(a), transmute(b)));
18814 assert_eq!(r, e);
18815 }
18816
18817 #[simd_test(enable = "neon")]
18818 unsafe fn test_vcopyq_laneq_s32() {
18819 let a: i32x4 = i32x4::new(1, 2, 3, 4);
18820 let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
18821 let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 2, 3, 4);
18822 let r: i32x4 = transmute(vcopyq_laneq_s32::<0, 1>(transmute(a), transmute(b)));
18823 assert_eq!(r, e);
18824 }
18825
18826 #[simd_test(enable = "neon")]
18827 unsafe fn test_vcopyq_laneq_s64() {
18828 let a: i64x2 = i64x2::new(1, 2);
18829 let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
18830 let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 2);
18831 let r: i64x2 = transmute(vcopyq_laneq_s64::<0, 1>(transmute(a), transmute(b)));
18832 assert_eq!(r, e);
18833 }
18834
18835 #[simd_test(enable = "neon")]
18836 unsafe fn test_vcopy_lane_u8() {
18837 let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18838 let b: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
18839 let e: u8x8 = u8x8::new(0xFF, 2, 3, 4, 5, 6, 7, 8);
18840 let r: u8x8 = transmute(vcopy_lane_u8::<0, 1>(transmute(a), transmute(b)));
18841 assert_eq!(r, e);
18842 }
18843
18844 #[simd_test(enable = "neon")]
18845 unsafe fn test_vcopyq_laneq_u8() {
18846 let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18847 let b: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18848 let e: u8x16 = u8x16::new(0xFF, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18849 let r: u8x16 = transmute(vcopyq_laneq_u8::<0, 1>(transmute(a), transmute(b)));
18850 assert_eq!(r, e);
18851 }
18852
18853 #[simd_test(enable = "neon")]
18854 unsafe fn test_vcopy_lane_u16() {
18855 let a: u16x4 = u16x4::new(1, 2, 3, 4);
18856 let b: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
18857 let e: u16x4 = u16x4::new(0xFF_FF, 2, 3, 4);
18858 let r: u16x4 = transmute(vcopy_lane_u16::<0, 1>(transmute(a), transmute(b)));
18859 assert_eq!(r, e);
18860 }
18861
18862 #[simd_test(enable = "neon")]
18863 unsafe fn test_vcopyq_laneq_u16() {
18864 let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18865 let b: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
18866 let e: u16x8 = u16x8::new(0xFF_FF, 2, 3, 4, 5, 6, 7, 8);
18867 let r: u16x8 = transmute(vcopyq_laneq_u16::<0, 1>(transmute(a), transmute(b)));
18868 assert_eq!(r, e);
18869 }
18870
18871 #[simd_test(enable = "neon")]
18872 unsafe fn test_vcopy_lane_u32() {
18873 let a: u32x2 = u32x2::new(1, 2);
18874 let b: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
18875 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 2);
18876 let r: u32x2 = transmute(vcopy_lane_u32::<0, 1>(transmute(a), transmute(b)));
18877 assert_eq!(r, e);
18878 }
18879
18880 #[simd_test(enable = "neon")]
18881 unsafe fn test_vcopyq_laneq_u32() {
18882 let a: u32x4 = u32x4::new(1, 2, 3, 4);
18883 let b: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
18884 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 2, 3, 4);
18885 let r: u32x4 = transmute(vcopyq_laneq_u32::<0, 1>(transmute(a), transmute(b)));
18886 assert_eq!(r, e);
18887 }
18888
18889 #[simd_test(enable = "neon")]
18890 unsafe fn test_vcopyq_laneq_u64() {
18891 let a: u64x2 = u64x2::new(1, 2);
18892 let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18893 let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 2);
18894 let r: u64x2 = transmute(vcopyq_laneq_u64::<0, 1>(transmute(a), transmute(b)));
18895 assert_eq!(r, e);
18896 }
18897
18898 #[simd_test(enable = "neon")]
18899 unsafe fn test_vcopy_lane_p8() {
18900 let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18901 let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
18902 let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
18903 let r: i8x8 = transmute(vcopy_lane_p8::<0, 1>(transmute(a), transmute(b)));
18904 assert_eq!(r, e);
18905 }
18906
18907 #[simd_test(enable = "neon")]
18908 unsafe fn test_vcopyq_laneq_p8() {
18909 let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18910 let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18911 let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18912 let r: i8x16 = transmute(vcopyq_laneq_p8::<0, 1>(transmute(a), transmute(b)));
18913 assert_eq!(r, e);
18914 }
18915
18916 #[simd_test(enable = "neon")]
18917 unsafe fn test_vcopy_lane_p16() {
18918 let a: i16x4 = i16x4::new(1, 2, 3, 4);
18919 let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
18920 let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
18921 let r: i16x4 = transmute(vcopy_lane_p16::<0, 1>(transmute(a), transmute(b)));
18922 assert_eq!(r, e);
18923 }
18924
18925 #[simd_test(enable = "neon")]
18926 unsafe fn test_vcopyq_laneq_p16() {
18927 let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18928 let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
18929 let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
18930 let r: i16x8 = transmute(vcopyq_laneq_p16::<0, 1>(transmute(a), transmute(b)));
18931 assert_eq!(r, e);
18932 }
18933
18934 #[simd_test(enable = "neon")]
18935 unsafe fn test_vcopyq_laneq_p64() {
18936 let a: i64x2 = i64x2::new(1, 2);
18937 let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
18938 let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 2);
18939 let r: i64x2 = transmute(vcopyq_laneq_p64::<0, 1>(transmute(a), transmute(b)));
18940 assert_eq!(r, e);
18941 }
18942
18943 #[simd_test(enable = "neon")]
18944 unsafe fn test_vcopy_lane_f32() {
18945 let a: f32x2 = f32x2::new(1., 2.);
18946 let b: f32x2 = f32x2::new(0., 0.5);
18947 let e: f32x2 = f32x2::new(0.5, 2.);
18948 let r: f32x2 = transmute(vcopy_lane_f32::<0, 1>(transmute(a), transmute(b)));
18949 assert_eq!(r, e);
18950 }
18951
18952 #[simd_test(enable = "neon")]
18953 unsafe fn test_vcopyq_laneq_f32() {
18954 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
18955 let b: f32x4 = f32x4::new(0., 0.5, 0., 0.);
18956 let e: f32x4 = f32x4::new(0.5, 2., 3., 4.);
18957 let r: f32x4 = transmute(vcopyq_laneq_f32::<0, 1>(transmute(a), transmute(b)));
18958 assert_eq!(r, e);
18959 }
18960
18961 #[simd_test(enable = "neon")]
18962 unsafe fn test_vcopyq_laneq_f64() {
18963 let a: f64x2 = f64x2::new(1., 2.);
18964 let b: f64x2 = f64x2::new(0., 0.5);
18965 let e: f64x2 = f64x2::new(0.5, 2.);
18966 let r: f64x2 = transmute(vcopyq_laneq_f64::<0, 1>(transmute(a), transmute(b)));
18967 assert_eq!(r, e);
18968 }
18969
18970 #[simd_test(enable = "neon")]
18971 unsafe fn test_vcopy_laneq_s8() {
18972 let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18973 let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18974 let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
18975 let r: i8x8 = transmute(vcopy_laneq_s8::<0, 1>(transmute(a), transmute(b)));
18976 assert_eq!(r, e);
18977 }
18978
18979 #[simd_test(enable = "neon")]
18980 unsafe fn test_vcopy_laneq_s16() {
18981 let a: i16x4 = i16x4::new(1, 2, 3, 4);
18982 let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
18983 let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
18984 let r: i16x4 = transmute(vcopy_laneq_s16::<0, 1>(transmute(a), transmute(b)));
18985 assert_eq!(r, e);
18986 }
18987
18988 #[simd_test(enable = "neon")]
18989 unsafe fn test_vcopy_laneq_s32() {
18990 let a: i32x2 = i32x2::new(1, 2);
18991 let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
18992 let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 2);
18993 let r: i32x2 = transmute(vcopy_laneq_s32::<0, 1>(transmute(a), transmute(b)));
18994 assert_eq!(r, e);
18995 }
18996
18997 #[simd_test(enable = "neon")]
18998 unsafe fn test_vcopy_laneq_u8() {
18999 let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19000 let b: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19001 let e: u8x8 = u8x8::new(0xFF, 2, 3, 4, 5, 6, 7, 8);
19002 let r: u8x8 = transmute(vcopy_laneq_u8::<0, 1>(transmute(a), transmute(b)));
19003 assert_eq!(r, e);
19004 }
19005
19006 #[simd_test(enable = "neon")]
19007 unsafe fn test_vcopy_laneq_u16() {
19008 let a: u16x4 = u16x4::new(1, 2, 3, 4);
19009 let b: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
19010 let e: u16x4 = u16x4::new(0xFF_FF, 2, 3, 4);
19011 let r: u16x4 = transmute(vcopy_laneq_u16::<0, 1>(transmute(a), transmute(b)));
19012 assert_eq!(r, e);
19013 }
19014
19015 #[simd_test(enable = "neon")]
19016 unsafe fn test_vcopy_laneq_u32() {
19017 let a: u32x2 = u32x2::new(1, 2);
19018 let b: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
19019 let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 2);
19020 let r: u32x2 = transmute(vcopy_laneq_u32::<0, 1>(transmute(a), transmute(b)));
19021 assert_eq!(r, e);
19022 }
19023
19024 #[simd_test(enable = "neon")]
19025 unsafe fn test_vcopy_laneq_p8() {
19026 let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19027 let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19028 let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
19029 let r: i8x8 = transmute(vcopy_laneq_p8::<0, 1>(transmute(a), transmute(b)));
19030 assert_eq!(r, e);
19031 }
19032
19033 #[simd_test(enable = "neon")]
19034 unsafe fn test_vcopy_laneq_p16() {
19035 let a: i16x4 = i16x4::new(1, 2, 3, 4);
19036 let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
19037 let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
19038 let r: i16x4 = transmute(vcopy_laneq_p16::<0, 1>(transmute(a), transmute(b)));
19039 assert_eq!(r, e);
19040 }
19041
19042 #[simd_test(enable = "neon")]
19043 unsafe fn test_vcopy_laneq_f32() {
19044 let a: f32x2 = f32x2::new(1., 2.);
19045 let b: f32x4 = f32x4::new(0., 0.5, 0., 0.);
19046 let e: f32x2 = f32x2::new(0.5, 2.);
19047 let r: f32x2 = transmute(vcopy_laneq_f32::<0, 1>(transmute(a), transmute(b)));
19048 assert_eq!(r, e);
19049 }
19050
19051 #[simd_test(enable = "neon")]
19052 unsafe fn test_vcopyq_lane_s8() {
19053 let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19054 let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
19055 let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19056 let r: i8x16 = transmute(vcopyq_lane_s8::<0, 1>(transmute(a), transmute(b)));
19057 assert_eq!(r, e);
19058 }
19059
19060 #[simd_test(enable = "neon")]
19061 unsafe fn test_vcopyq_lane_s16() {
19062 let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19063 let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
19064 let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
19065 let r: i16x8 = transmute(vcopyq_lane_s16::<0, 1>(transmute(a), transmute(b)));
19066 assert_eq!(r, e);
19067 }
19068
19069 #[simd_test(enable = "neon")]
19070 unsafe fn test_vcopyq_lane_s32() {
19071 let a: i32x4 = i32x4::new(1, 2, 3, 4);
19072 let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
19073 let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 2, 3, 4);
19074 let r: i32x4 = transmute(vcopyq_lane_s32::<0, 1>(transmute(a), transmute(b)));
19075 assert_eq!(r, e);
19076 }
19077
19078 #[simd_test(enable = "neon")]
19079 unsafe fn test_vcopyq_lane_u8() {
19080 let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19081 let b: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
19082 let e: u8x16 = u8x16::new(0xFF, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19083 let r: u8x16 = transmute(vcopyq_lane_u8::<0, 1>(transmute(a), transmute(b)));
19084 assert_eq!(r, e);
19085 }
19086
19087 #[simd_test(enable = "neon")]
19088 unsafe fn test_vcopyq_lane_u16() {
19089 let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19090 let b: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
19091 let e: u16x8 = u16x8::new(0xFF_FF, 2, 3, 4, 5, 6, 7, 8);
19092 let r: u16x8 = transmute(vcopyq_lane_u16::<0, 1>(transmute(a), transmute(b)));
19093 assert_eq!(r, e);
19094 }
19095
19096 #[simd_test(enable = "neon")]
19097 unsafe fn test_vcopyq_lane_u32() {
19098 let a: u32x4 = u32x4::new(1, 2, 3, 4);
19099 let b: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
19100 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 2, 3, 4);
19101 let r: u32x4 = transmute(vcopyq_lane_u32::<0, 1>(transmute(a), transmute(b)));
19102 assert_eq!(r, e);
19103 }
19104
19105 #[simd_test(enable = "neon")]
19106 unsafe fn test_vcopyq_lane_p8() {
19107 let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19108 let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
19109 let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19110 let r: i8x16 = transmute(vcopyq_lane_p8::<0, 1>(transmute(a), transmute(b)));
19111 assert_eq!(r, e);
19112 }
19113
19114 #[simd_test(enable = "neon")]
19115 unsafe fn test_vcopyq_lane_p16() {
19116 let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19117 let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
19118 let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
19119 let r: i16x8 = transmute(vcopyq_lane_p16::<0, 1>(transmute(a), transmute(b)));
19120 assert_eq!(r, e);
19121 }
19122
19123 #[simd_test(enable = "neon")]
19124 unsafe fn test_vcopyq_lane_s64() {
19125 let a: i64x2 = i64x2::new(1, 2);
19126 let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
19127 let e: i64x2 = i64x2::new(1, 0x7F_FF_FF_FF_FF_FF_FF_FF);
19128 let r: i64x2 = transmute(vcopyq_lane_s64::<1, 0>(transmute(a), transmute(b)));
19129 assert_eq!(r, e);
19130 }
19131
19132 #[simd_test(enable = "neon")]
19133 unsafe fn test_vcopyq_lane_u64() {
19134 let a: u64x2 = u64x2::new(1, 2);
19135 let b: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
19136 let e: u64x2 = u64x2::new(1, 0xFF_FF_FF_FF_FF_FF_FF_FF);
19137 let r: u64x2 = transmute(vcopyq_lane_u64::<1, 0>(transmute(a), transmute(b)));
17df50a5
XL
19138 assert_eq!(r, e);
19139 }
19140
19141 #[simd_test(enable = "neon")]
3c0e092e
XL
19142 unsafe fn test_vcopyq_lane_p64() {
19143 let a: i64x2 = i64x2::new(1, 2);
19144 let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
19145 let e: i64x2 = i64x2::new(1, 0x7F_FF_FF_FF_FF_FF_FF_FF);
19146 let r: i64x2 = transmute(vcopyq_lane_p64::<1, 0>(transmute(a), transmute(b)));
17df50a5
XL
19147 assert_eq!(r, e);
19148 }
19149
19150 #[simd_test(enable = "neon")]
3c0e092e
XL
19151 unsafe fn test_vcopyq_lane_f32() {
19152 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
19153 let b: f32x2 = f32x2::new(0.5, 0.);
19154 let e: f32x4 = f32x4::new(1., 0.5, 3., 4.);
19155 let r: f32x4 = transmute(vcopyq_lane_f32::<1, 0>(transmute(a), transmute(b)));
17df50a5
XL
19156 assert_eq!(r, e);
19157 }
19158
19159 #[simd_test(enable = "neon")]
3c0e092e
XL
19160 unsafe fn test_vcopyq_lane_f64() {
19161 let a: f64x2 = f64x2::new(1., 2.);
19162 let b: f64 = 0.5;
19163 let e: f64x2 = f64x2::new(1., 0.5);
19164 let r: f64x2 = transmute(vcopyq_lane_f64::<1, 0>(transmute(a), transmute(b)));
17df50a5
XL
19165 assert_eq!(r, e);
19166 }
19167
19168 #[simd_test(enable = "neon")]
3c0e092e
XL
19169 unsafe fn test_vcreate_f64() {
19170 let a: u64 = 0;
19171 let e: f64 = 0.;
19172 let r: f64 = transmute(vcreate_f64(transmute(a)));
17df50a5
XL
19173 assert_eq!(r, e);
19174 }
19175
19176 #[simd_test(enable = "neon")]
3c0e092e
XL
19177 unsafe fn test_vcvt_f64_s64() {
19178 let a: i64x1 = i64x1::new(1);
19179 let e: f64 = 1.;
19180 let r: f64 = transmute(vcvt_f64_s64(transmute(a)));
17df50a5
XL
19181 assert_eq!(r, e);
19182 }
19183
19184 #[simd_test(enable = "neon")]
3c0e092e
XL
19185 unsafe fn test_vcvtq_f64_s64() {
19186 let a: i64x2 = i64x2::new(1, 2);
19187 let e: f64x2 = f64x2::new(1., 2.);
19188 let r: f64x2 = transmute(vcvtq_f64_s64(transmute(a)));
17df50a5
XL
19189 assert_eq!(r, e);
19190 }
19191
19192 #[simd_test(enable = "neon")]
3c0e092e
XL
19193 unsafe fn test_vcvt_f64_u64() {
19194 let a: u64x1 = u64x1::new(1);
19195 let e: f64 = 1.;
19196 let r: f64 = transmute(vcvt_f64_u64(transmute(a)));
17df50a5 19197 assert_eq!(r, e);
3c0e092e 19198 }
17df50a5 19199
3c0e092e
XL
19200 #[simd_test(enable = "neon")]
19201 unsafe fn test_vcvtq_f64_u64() {
19202 let a: u64x2 = u64x2::new(1, 2);
19203 let e: f64x2 = f64x2::new(1., 2.);
19204 let r: f64x2 = transmute(vcvtq_f64_u64(transmute(a)));
17df50a5
XL
19205 assert_eq!(r, e);
19206 }
19207
19208 #[simd_test(enable = "neon")]
3c0e092e
XL
19209 unsafe fn test_vcvt_f64_f32() {
19210 let a: f32x2 = f32x2::new(-1.2, 1.2);
19211 let e: f64x2 = f64x2::new(-1.2f32 as f64, 1.2f32 as f64);
19212 let r: f64x2 = transmute(vcvt_f64_f32(transmute(a)));
17df50a5 19213 assert_eq!(r, e);
3c0e092e 19214 }
17df50a5 19215
3c0e092e
XL
19216 #[simd_test(enable = "neon")]
19217 unsafe fn test_vcvt_high_f64_f32() {
19218 let a: f32x4 = f32x4::new(-1.2, 1.2, 2.3, 3.4);
19219 let e: f64x2 = f64x2::new(2.3f32 as f64, 3.4f32 as f64);
19220 let r: f64x2 = transmute(vcvt_high_f64_f32(transmute(a)));
17df50a5
XL
19221 assert_eq!(r, e);
19222 }
19223
19224 #[simd_test(enable = "neon")]
3c0e092e
XL
19225 unsafe fn test_vcvt_f32_f64() {
19226 let a: f64x2 = f64x2::new(-1.2, 1.2);
19227 let e: f32x2 = f32x2::new(-1.2f64 as f32, 1.2f64 as f32);
19228 let r: f32x2 = transmute(vcvt_f32_f64(transmute(a)));
17df50a5 19229 assert_eq!(r, e);
3c0e092e 19230 }
17df50a5 19231
3c0e092e
XL
19232 #[simd_test(enable = "neon")]
19233 unsafe fn test_vcvt_high_f32_f64() {
19234 let a: f32x2 = f32x2::new(-1.2, 1.2);
19235 let b: f64x2 = f64x2::new(-2.3, 3.4);
19236 let e: f32x4 = f32x4::new(-1.2, 1.2, -2.3f64 as f32, 3.4f64 as f32);
19237 let r: f32x4 = transmute(vcvt_high_f32_f64(transmute(a), transmute(b)));
17df50a5
XL
19238 assert_eq!(r, e);
19239 }
19240
19241 #[simd_test(enable = "neon")]
3c0e092e
XL
19242 unsafe fn test_vcvtx_f32_f64() {
19243 let a: f64x2 = f64x2::new(-1.0, 2.0);
19244 let e: f32x2 = f32x2::new(-1.0, 2.0);
19245 let r: f32x2 = transmute(vcvtx_f32_f64(transmute(a)));
17df50a5 19246 assert_eq!(r, e);
3c0e092e 19247 }
17df50a5 19248
3c0e092e
XL
19249 #[simd_test(enable = "neon")]
19250 unsafe fn test_vcvtxd_f32_f64() {
19251 let a: f64 = -1.0;
19252 let e: f32 = -1.0;
19253 let r: f32 = transmute(vcvtxd_f32_f64(transmute(a)));
17df50a5
XL
19254 assert_eq!(r, e);
19255 }
19256
19257 #[simd_test(enable = "neon")]
3c0e092e
XL
19258 unsafe fn test_vcvtx_high_f32_f64() {
19259 let a: f32x2 = f32x2::new(-1.0, 2.0);
19260 let b: f64x2 = f64x2::new(-3.0, 4.0);
19261 let e: f32x4 = f32x4::new(-1.0, 2.0, -3.0, 4.0);
19262 let r: f32x4 = transmute(vcvtx_high_f32_f64(transmute(a), transmute(b)));
17df50a5 19263 assert_eq!(r, e);
3c0e092e 19264 }
17df50a5 19265
3c0e092e
XL
19266 #[simd_test(enable = "neon")]
19267 unsafe fn test_vcvt_n_f64_s64() {
19268 let a: i64x1 = i64x1::new(1);
19269 let e: f64 = 0.25;
19270 let r: f64 = transmute(vcvt_n_f64_s64::<2>(transmute(a)));
17df50a5
XL
19271 assert_eq!(r, e);
19272 }
19273
19274 #[simd_test(enable = "neon")]
3c0e092e
XL
19275 unsafe fn test_vcvtq_n_f64_s64() {
19276 let a: i64x2 = i64x2::new(1, 2);
19277 let e: f64x2 = f64x2::new(0.25, 0.5);
19278 let r: f64x2 = transmute(vcvtq_n_f64_s64::<2>(transmute(a)));
17df50a5 19279 assert_eq!(r, e);
3c0e092e 19280 }
17df50a5 19281
3c0e092e
XL
19282 #[simd_test(enable = "neon")]
19283 unsafe fn test_vcvts_n_f32_s32() {
19284 let a: i32 = 1;
19285 let e: f32 = 0.25;
19286 let r: f32 = transmute(vcvts_n_f32_s32::<2>(transmute(a)));
17df50a5
XL
19287 assert_eq!(r, e);
19288 }
19289
19290 #[simd_test(enable = "neon")]
3c0e092e
XL
19291 unsafe fn test_vcvtd_n_f64_s64() {
19292 let a: i64 = 1;
19293 let e: f64 = 0.25;
19294 let r: f64 = transmute(vcvtd_n_f64_s64::<2>(transmute(a)));
17df50a5
XL
19295 assert_eq!(r, e);
19296 }
19297
19298 #[simd_test(enable = "neon")]
3c0e092e
XL
19299 unsafe fn test_vcvt_n_f64_u64() {
19300 let a: u64x1 = u64x1::new(1);
19301 let e: f64 = 0.25;
19302 let r: f64 = transmute(vcvt_n_f64_u64::<2>(transmute(a)));
17df50a5
XL
19303 assert_eq!(r, e);
19304 }
19305
19306 #[simd_test(enable = "neon")]
3c0e092e
XL
19307 unsafe fn test_vcvtq_n_f64_u64() {
19308 let a: u64x2 = u64x2::new(1, 2);
19309 let e: f64x2 = f64x2::new(0.25, 0.5);
19310 let r: f64x2 = transmute(vcvtq_n_f64_u64::<2>(transmute(a)));
17df50a5
XL
19311 assert_eq!(r, e);
19312 }
19313
19314 #[simd_test(enable = "neon")]
3c0e092e
XL
19315 unsafe fn test_vcvts_n_f32_u32() {
19316 let a: u32 = 1;
19317 let e: f32 = 0.25;
19318 let r: f32 = transmute(vcvts_n_f32_u32::<2>(transmute(a)));
17df50a5
XL
19319 assert_eq!(r, e);
19320 }
19321
19322 #[simd_test(enable = "neon")]
3c0e092e
XL
19323 unsafe fn test_vcvtd_n_f64_u64() {
19324 let a: u64 = 1;
19325 let e: f64 = 0.25;
19326 let r: f64 = transmute(vcvtd_n_f64_u64::<2>(transmute(a)));
17df50a5
XL
19327 assert_eq!(r, e);
19328 }
19329
19330 #[simd_test(enable = "neon")]
3c0e092e
XL
19331 unsafe fn test_vcvt_n_s64_f64() {
19332 let a: f64 = 0.25;
19333 let e: i64x1 = i64x1::new(1);
19334 let r: i64x1 = transmute(vcvt_n_s64_f64::<2>(transmute(a)));
17df50a5
XL
19335 assert_eq!(r, e);
19336 }
19337
19338 #[simd_test(enable = "neon")]
3c0e092e
XL
19339 unsafe fn test_vcvtq_n_s64_f64() {
19340 let a: f64x2 = f64x2::new(0.25, 0.5);
19341 let e: i64x2 = i64x2::new(1, 2);
19342 let r: i64x2 = transmute(vcvtq_n_s64_f64::<2>(transmute(a)));
17df50a5
XL
19343 assert_eq!(r, e);
19344 }
19345
19346 #[simd_test(enable = "neon")]
3c0e092e
XL
19347 unsafe fn test_vcvts_n_s32_f32() {
19348 let a: f32 = 0.25;
19349 let e: i32 = 1;
19350 let r: i32 = transmute(vcvts_n_s32_f32::<2>(transmute(a)));
17df50a5
XL
19351 assert_eq!(r, e);
19352 }
19353
19354 #[simd_test(enable = "neon")]
3c0e092e
XL
19355 unsafe fn test_vcvtd_n_s64_f64() {
19356 let a: f64 = 0.25;
19357 let e: i64 = 1;
19358 let r: i64 = transmute(vcvtd_n_s64_f64::<2>(transmute(a)));
17df50a5
XL
19359 assert_eq!(r, e);
19360 }
19361
19362 #[simd_test(enable = "neon")]
3c0e092e
XL
19363 unsafe fn test_vcvt_n_u64_f64() {
19364 let a: f64 = 0.25;
19365 let e: u64x1 = u64x1::new(1);
19366 let r: u64x1 = transmute(vcvt_n_u64_f64::<2>(transmute(a)));
17df50a5
XL
19367 assert_eq!(r, e);
19368 }
19369
19370 #[simd_test(enable = "neon")]
3c0e092e
XL
19371 unsafe fn test_vcvtq_n_u64_f64() {
19372 let a: f64x2 = f64x2::new(0.25, 0.5);
19373 let e: u64x2 = u64x2::new(1, 2);
19374 let r: u64x2 = transmute(vcvtq_n_u64_f64::<2>(transmute(a)));
17df50a5
XL
19375 assert_eq!(r, e);
19376 }
19377
19378 #[simd_test(enable = "neon")]
3c0e092e
XL
19379 unsafe fn test_vcvts_n_u32_f32() {
19380 let a: f32 = 0.25;
19381 let e: u32 = 1;
19382 let r: u32 = transmute(vcvts_n_u32_f32::<2>(transmute(a)));
17df50a5
XL
19383 assert_eq!(r, e);
19384 }
19385
19386 #[simd_test(enable = "neon")]
3c0e092e
XL
19387 unsafe fn test_vcvtd_n_u64_f64() {
19388 let a: f64 = 0.25;
19389 let e: u64 = 1;
19390 let r: u64 = transmute(vcvtd_n_u64_f64::<2>(transmute(a)));
17df50a5
XL
19391 assert_eq!(r, e);
19392 }
19393
19394 #[simd_test(enable = "neon")]
3c0e092e
XL
19395 unsafe fn test_vcvts_f32_s32() {
19396 let a: i32 = 1;
19397 let e: f32 = 1.;
19398 let r: f32 = transmute(vcvts_f32_s32(transmute(a)));
17df50a5
XL
19399 assert_eq!(r, e);
19400 }
19401
19402 #[simd_test(enable = "neon")]
3c0e092e
XL
19403 unsafe fn test_vcvtd_f64_s64() {
19404 let a: i64 = 1;
19405 let e: f64 = 1.;
19406 let r: f64 = transmute(vcvtd_f64_s64(transmute(a)));
17df50a5
XL
19407 assert_eq!(r, e);
19408 }
19409
19410 #[simd_test(enable = "neon")]
3c0e092e
XL
19411 unsafe fn test_vcvts_f32_u32() {
19412 let a: u32 = 1;
19413 let e: f32 = 1.;
19414 let r: f32 = transmute(vcvts_f32_u32(transmute(a)));
17df50a5
XL
19415 assert_eq!(r, e);
19416 }
19417
19418 #[simd_test(enable = "neon")]
3c0e092e
XL
19419 unsafe fn test_vcvtd_f64_u64() {
19420 let a: u64 = 1;
19421 let e: f64 = 1.;
19422 let r: f64 = transmute(vcvtd_f64_u64(transmute(a)));
17df50a5
XL
19423 assert_eq!(r, e);
19424 }
19425
19426 #[simd_test(enable = "neon")]
3c0e092e
XL
19427 unsafe fn test_vcvts_s32_f32() {
19428 let a: f32 = 1.;
19429 let e: i32 = 1;
19430 let r: i32 = transmute(vcvts_s32_f32(transmute(a)));
17df50a5
XL
19431 assert_eq!(r, e);
19432 }
19433
19434 #[simd_test(enable = "neon")]
3c0e092e
XL
19435 unsafe fn test_vcvtd_s64_f64() {
19436 let a: f64 = 1.;
19437 let e: i64 = 1;
19438 let r: i64 = transmute(vcvtd_s64_f64(transmute(a)));
17df50a5
XL
19439 assert_eq!(r, e);
19440 }
19441
19442 #[simd_test(enable = "neon")]
3c0e092e
XL
19443 unsafe fn test_vcvts_u32_f32() {
19444 let a: f32 = 1.;
19445 let e: u32 = 1;
19446 let r: u32 = transmute(vcvts_u32_f32(transmute(a)));
17df50a5
XL
19447 assert_eq!(r, e);
19448 }
19449
19450 #[simd_test(enable = "neon")]
3c0e092e
XL
19451 unsafe fn test_vcvtd_u64_f64() {
19452 let a: f64 = 1.;
19453 let e: u64 = 1;
19454 let r: u64 = transmute(vcvtd_u64_f64(transmute(a)));
17df50a5
XL
19455 assert_eq!(r, e);
19456 }
19457
19458 #[simd_test(enable = "neon")]
3c0e092e
XL
19459 unsafe fn test_vcvt_s64_f64() {
19460 let a: f64 = -1.1;
19461 let e: i64x1 = i64x1::new(-1);
19462 let r: i64x1 = transmute(vcvt_s64_f64(transmute(a)));
17df50a5
XL
19463 assert_eq!(r, e);
19464 }
19465
19466 #[simd_test(enable = "neon")]
3c0e092e
XL
19467 unsafe fn test_vcvtq_s64_f64() {
19468 let a: f64x2 = f64x2::new(-1.1, 2.1);
19469 let e: i64x2 = i64x2::new(-1, 2);
19470 let r: i64x2 = transmute(vcvtq_s64_f64(transmute(a)));
17df50a5
XL
19471 assert_eq!(r, e);
19472 }
19473
19474 #[simd_test(enable = "neon")]
3c0e092e
XL
19475 unsafe fn test_vcvt_u64_f64() {
19476 let a: f64 = 1.1;
19477 let e: u64x1 = u64x1::new(1);
19478 let r: u64x1 = transmute(vcvt_u64_f64(transmute(a)));
17df50a5
XL
19479 assert_eq!(r, e);
19480 }
19481
19482 #[simd_test(enable = "neon")]
3c0e092e
XL
19483 unsafe fn test_vcvtq_u64_f64() {
19484 let a: f64x2 = f64x2::new(1.1, 2.1);
19485 let e: u64x2 = u64x2::new(1, 2);
19486 let r: u64x2 = transmute(vcvtq_u64_f64(transmute(a)));
17df50a5
XL
19487 assert_eq!(r, e);
19488 }
19489
19490 #[simd_test(enable = "neon")]
3c0e092e
XL
19491 unsafe fn test_vcvta_s32_f32() {
19492 let a: f32x2 = f32x2::new(-1.1, 2.1);
19493 let e: i32x2 = i32x2::new(-1, 2);
19494 let r: i32x2 = transmute(vcvta_s32_f32(transmute(a)));
17df50a5
XL
19495 assert_eq!(r, e);
19496 }
19497
19498 #[simd_test(enable = "neon")]
3c0e092e
XL
19499 unsafe fn test_vcvtaq_s32_f32() {
19500 let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19501 let e: i32x4 = i32x4::new(-1, 2, -3, 4);
19502 let r: i32x4 = transmute(vcvtaq_s32_f32(transmute(a)));
17df50a5
XL
19503 assert_eq!(r, e);
19504 }
19505
19506 #[simd_test(enable = "neon")]
3c0e092e
XL
19507 unsafe fn test_vcvta_s64_f64() {
19508 let a: f64 = -1.1;
19509 let e: i64x1 = i64x1::new(-1);
19510 let r: i64x1 = transmute(vcvta_s64_f64(transmute(a)));
17df50a5
XL
19511 assert_eq!(r, e);
19512 }
19513
19514 #[simd_test(enable = "neon")]
3c0e092e
XL
19515 unsafe fn test_vcvtaq_s64_f64() {
19516 let a: f64x2 = f64x2::new(-1.1, 2.1);
19517 let e: i64x2 = i64x2::new(-1, 2);
19518 let r: i64x2 = transmute(vcvtaq_s64_f64(transmute(a)));
17df50a5
XL
19519 assert_eq!(r, e);
19520 }
19521
19522 #[simd_test(enable = "neon")]
3c0e092e
XL
19523 unsafe fn test_vcvtas_s32_f32() {
19524 let a: f32 = 2.9;
19525 let e: i32 = 3;
19526 let r: i32 = transmute(vcvtas_s32_f32(transmute(a)));
17df50a5
XL
19527 assert_eq!(r, e);
19528 }
19529
19530 #[simd_test(enable = "neon")]
3c0e092e
XL
19531 unsafe fn test_vcvtad_s64_f64() {
19532 let a: f64 = 2.9;
19533 let e: i64 = 3;
19534 let r: i64 = transmute(vcvtad_s64_f64(transmute(a)));
17df50a5
XL
19535 assert_eq!(r, e);
19536 }
19537
19538 #[simd_test(enable = "neon")]
3c0e092e
XL
19539 unsafe fn test_vcvtas_u32_f32() {
19540 let a: f32 = 2.9;
19541 let e: u32 = 3;
19542 let r: u32 = transmute(vcvtas_u32_f32(transmute(a)));
17df50a5
XL
19543 assert_eq!(r, e);
19544 }
19545
19546 #[simd_test(enable = "neon")]
3c0e092e
XL
19547 unsafe fn test_vcvtad_u64_f64() {
19548 let a: f64 = 2.9;
19549 let e: u64 = 3;
19550 let r: u64 = transmute(vcvtad_u64_f64(transmute(a)));
17df50a5
XL
19551 assert_eq!(r, e);
19552 }
19553
19554 #[simd_test(enable = "neon")]
3c0e092e
XL
19555 unsafe fn test_vcvtn_s32_f32() {
19556 let a: f32x2 = f32x2::new(-1.5, 2.1);
19557 let e: i32x2 = i32x2::new(-2, 2);
19558 let r: i32x2 = transmute(vcvtn_s32_f32(transmute(a)));
17df50a5
XL
19559 assert_eq!(r, e);
19560 }
19561
19562 #[simd_test(enable = "neon")]
3c0e092e
XL
19563 unsafe fn test_vcvtnq_s32_f32() {
19564 let a: f32x4 = f32x4::new(-1.5, 2.1, -2.9, 3.9);
19565 let e: i32x4 = i32x4::new(-2, 2, -3, 4);
19566 let r: i32x4 = transmute(vcvtnq_s32_f32(transmute(a)));
17df50a5
XL
19567 assert_eq!(r, e);
19568 }
19569
19570 #[simd_test(enable = "neon")]
3c0e092e
XL
19571 unsafe fn test_vcvtn_s64_f64() {
19572 let a: f64 = -1.5;
19573 let e: i64x1 = i64x1::new(-2);
19574 let r: i64x1 = transmute(vcvtn_s64_f64(transmute(a)));
17df50a5
XL
19575 assert_eq!(r, e);
19576 }
19577
19578 #[simd_test(enable = "neon")]
3c0e092e
XL
19579 unsafe fn test_vcvtnq_s64_f64() {
19580 let a: f64x2 = f64x2::new(-1.5, 2.1);
19581 let e: i64x2 = i64x2::new(-2, 2);
19582 let r: i64x2 = transmute(vcvtnq_s64_f64(transmute(a)));
17df50a5
XL
19583 assert_eq!(r, e);
19584 }
19585
19586 #[simd_test(enable = "neon")]
3c0e092e
XL
19587 unsafe fn test_vcvtns_s32_f32() {
19588 let a: f32 = -1.5;
19589 let e: i32 = -2;
19590 let r: i32 = transmute(vcvtns_s32_f32(transmute(a)));
17df50a5
XL
19591 assert_eq!(r, e);
19592 }
19593
19594 #[simd_test(enable = "neon")]
3c0e092e
XL
19595 unsafe fn test_vcvtnd_s64_f64() {
19596 let a: f64 = -1.5;
19597 let e: i64 = -2;
19598 let r: i64 = transmute(vcvtnd_s64_f64(transmute(a)));
17df50a5
XL
19599 assert_eq!(r, e);
19600 }
19601
19602 #[simd_test(enable = "neon")]
3c0e092e
XL
19603 unsafe fn test_vcvtm_s32_f32() {
19604 let a: f32x2 = f32x2::new(-1.1, 2.1);
19605 let e: i32x2 = i32x2::new(-2, 2);
19606 let r: i32x2 = transmute(vcvtm_s32_f32(transmute(a)));
17df50a5
XL
19607 assert_eq!(r, e);
19608 }
19609
19610 #[simd_test(enable = "neon")]
3c0e092e
XL
19611 unsafe fn test_vcvtmq_s32_f32() {
19612 let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19613 let e: i32x4 = i32x4::new(-2, 2, -3, 3);
19614 let r: i32x4 = transmute(vcvtmq_s32_f32(transmute(a)));
17df50a5
XL
19615 assert_eq!(r, e);
19616 }
19617
19618 #[simd_test(enable = "neon")]
3c0e092e
XL
19619 unsafe fn test_vcvtm_s64_f64() {
19620 let a: f64 = -1.1;
19621 let e: i64x1 = i64x1::new(-2);
19622 let r: i64x1 = transmute(vcvtm_s64_f64(transmute(a)));
17df50a5
XL
19623 assert_eq!(r, e);
19624 }
19625
19626 #[simd_test(enable = "neon")]
3c0e092e
XL
19627 unsafe fn test_vcvtmq_s64_f64() {
19628 let a: f64x2 = f64x2::new(-1.1, 2.1);
19629 let e: i64x2 = i64x2::new(-2, 2);
19630 let r: i64x2 = transmute(vcvtmq_s64_f64(transmute(a)));
17df50a5
XL
19631 assert_eq!(r, e);
19632 }
19633
19634 #[simd_test(enable = "neon")]
3c0e092e
XL
19635 unsafe fn test_vcvtms_s32_f32() {
19636 let a: f32 = -1.1;
19637 let e: i32 = -2;
19638 let r: i32 = transmute(vcvtms_s32_f32(transmute(a)));
17df50a5
XL
19639 assert_eq!(r, e);
19640 }
19641
19642 #[simd_test(enable = "neon")]
3c0e092e
XL
19643 unsafe fn test_vcvtmd_s64_f64() {
19644 let a: f64 = -1.1;
19645 let e: i64 = -2;
19646 let r: i64 = transmute(vcvtmd_s64_f64(transmute(a)));
17df50a5
XL
19647 assert_eq!(r, e);
19648 }
19649
19650 #[simd_test(enable = "neon")]
3c0e092e
XL
19651 unsafe fn test_vcvtp_s32_f32() {
19652 let a: f32x2 = f32x2::new(-1.1, 2.1);
19653 let e: i32x2 = i32x2::new(-1, 3);
19654 let r: i32x2 = transmute(vcvtp_s32_f32(transmute(a)));
17df50a5
XL
19655 assert_eq!(r, e);
19656 }
19657
19658 #[simd_test(enable = "neon")]
3c0e092e
XL
19659 unsafe fn test_vcvtpq_s32_f32() {
19660 let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19661 let e: i32x4 = i32x4::new(-1, 3, -2, 4);
19662 let r: i32x4 = transmute(vcvtpq_s32_f32(transmute(a)));
17df50a5
XL
19663 assert_eq!(r, e);
19664 }
19665
19666 #[simd_test(enable = "neon")]
3c0e092e
XL
19667 unsafe fn test_vcvtp_s64_f64() {
19668 let a: f64 = -1.1;
19669 let e: i64x1 = i64x1::new(-1);
19670 let r: i64x1 = transmute(vcvtp_s64_f64(transmute(a)));
17df50a5
XL
19671 assert_eq!(r, e);
19672 }
19673
19674 #[simd_test(enable = "neon")]
3c0e092e
XL
19675 unsafe fn test_vcvtpq_s64_f64() {
19676 let a: f64x2 = f64x2::new(-1.1, 2.1);
19677 let e: i64x2 = i64x2::new(-1, 3);
19678 let r: i64x2 = transmute(vcvtpq_s64_f64(transmute(a)));
17df50a5
XL
19679 assert_eq!(r, e);
19680 }
19681
19682 #[simd_test(enable = "neon")]
3c0e092e
XL
19683 unsafe fn test_vcvtps_s32_f32() {
19684 let a: f32 = -1.1;
19685 let e: i32 = -1;
19686 let r: i32 = transmute(vcvtps_s32_f32(transmute(a)));
17df50a5
XL
19687 assert_eq!(r, e);
19688 }
19689
19690 #[simd_test(enable = "neon")]
3c0e092e
XL
19691 unsafe fn test_vcvtpd_s64_f64() {
19692 let a: f64 = -1.1;
19693 let e: i64 = -1;
19694 let r: i64 = transmute(vcvtpd_s64_f64(transmute(a)));
17df50a5
XL
19695 assert_eq!(r, e);
19696 }
19697
19698 #[simd_test(enable = "neon")]
3c0e092e
XL
19699 unsafe fn test_vcvta_u32_f32() {
19700 let a: f32x2 = f32x2::new(1.1, 2.1);
19701 let e: u32x2 = u32x2::new(1, 2);
19702 let r: u32x2 = transmute(vcvta_u32_f32(transmute(a)));
17df50a5
XL
19703 assert_eq!(r, e);
19704 }
19705
19706 #[simd_test(enable = "neon")]
3c0e092e
XL
19707 unsafe fn test_vcvtaq_u32_f32() {
19708 let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19709 let e: u32x4 = u32x4::new(1, 2, 3, 4);
19710 let r: u32x4 = transmute(vcvtaq_u32_f32(transmute(a)));
17df50a5
XL
19711 assert_eq!(r, e);
19712 }
19713
19714 #[simd_test(enable = "neon")]
3c0e092e
XL
19715 unsafe fn test_vcvta_u64_f64() {
19716 let a: f64 = 1.1;
19717 let e: u64x1 = u64x1::new(1);
19718 let r: u64x1 = transmute(vcvta_u64_f64(transmute(a)));
17df50a5
XL
19719 assert_eq!(r, e);
19720 }
19721
19722 #[simd_test(enable = "neon")]
3c0e092e
XL
19723 unsafe fn test_vcvtaq_u64_f64() {
19724 let a: f64x2 = f64x2::new(1.1, 2.1);
19725 let e: u64x2 = u64x2::new(1, 2);
19726 let r: u64x2 = transmute(vcvtaq_u64_f64(transmute(a)));
17df50a5
XL
19727 assert_eq!(r, e);
19728 }
19729
19730 #[simd_test(enable = "neon")]
3c0e092e
XL
19731 unsafe fn test_vcvtn_u32_f32() {
19732 let a: f32x2 = f32x2::new(1.5, 2.1);
19733 let e: u32x2 = u32x2::new(2, 2);
19734 let r: u32x2 = transmute(vcvtn_u32_f32(transmute(a)));
17df50a5
XL
19735 assert_eq!(r, e);
19736 }
19737
19738 #[simd_test(enable = "neon")]
3c0e092e
XL
19739 unsafe fn test_vcvtnq_u32_f32() {
19740 let a: f32x4 = f32x4::new(1.5, 2.1, 2.9, 3.9);
19741 let e: u32x4 = u32x4::new(2, 2, 3, 4);
19742 let r: u32x4 = transmute(vcvtnq_u32_f32(transmute(a)));
17df50a5
XL
19743 assert_eq!(r, e);
19744 }
19745
19746 #[simd_test(enable = "neon")]
3c0e092e
XL
19747 unsafe fn test_vcvtn_u64_f64() {
19748 let a: f64 = 1.5;
19749 let e: u64x1 = u64x1::new(2);
19750 let r: u64x1 = transmute(vcvtn_u64_f64(transmute(a)));
17df50a5
XL
19751 assert_eq!(r, e);
19752 }
19753
19754 #[simd_test(enable = "neon")]
3c0e092e
XL
19755 unsafe fn test_vcvtnq_u64_f64() {
19756 let a: f64x2 = f64x2::new(1.5, 2.1);
19757 let e: u64x2 = u64x2::new(2, 2);
19758 let r: u64x2 = transmute(vcvtnq_u64_f64(transmute(a)));
17df50a5
XL
19759 assert_eq!(r, e);
19760 }
19761
19762 #[simd_test(enable = "neon")]
3c0e092e
XL
19763 unsafe fn test_vcvtns_u32_f32() {
19764 let a: f32 = 1.5;
19765 let e: u32 = 2;
19766 let r: u32 = transmute(vcvtns_u32_f32(transmute(a)));
17df50a5
XL
19767 assert_eq!(r, e);
19768 }
19769
19770 #[simd_test(enable = "neon")]
3c0e092e
XL
19771 unsafe fn test_vcvtnd_u64_f64() {
19772 let a: f64 = 1.5;
19773 let e: u64 = 2;
19774 let r: u64 = transmute(vcvtnd_u64_f64(transmute(a)));
17df50a5
XL
19775 assert_eq!(r, e);
19776 }
19777
19778 #[simd_test(enable = "neon")]
3c0e092e
XL
19779 unsafe fn test_vcvtm_u32_f32() {
19780 let a: f32x2 = f32x2::new(1.1, 2.1);
19781 let e: u32x2 = u32x2::new(1, 2);
19782 let r: u32x2 = transmute(vcvtm_u32_f32(transmute(a)));
17df50a5
XL
19783 assert_eq!(r, e);
19784 }
19785
19786 #[simd_test(enable = "neon")]
3c0e092e
XL
19787 unsafe fn test_vcvtmq_u32_f32() {
19788 let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19789 let e: u32x4 = u32x4::new(1, 2, 2, 3);
19790 let r: u32x4 = transmute(vcvtmq_u32_f32(transmute(a)));
17df50a5
XL
19791 assert_eq!(r, e);
19792 }
19793
19794 #[simd_test(enable = "neon")]
3c0e092e
XL
19795 unsafe fn test_vcvtm_u64_f64() {
19796 let a: f64 = 1.1;
19797 let e: u64x1 = u64x1::new(1);
19798 let r: u64x1 = transmute(vcvtm_u64_f64(transmute(a)));
17df50a5
XL
19799 assert_eq!(r, e);
19800 }
19801
19802 #[simd_test(enable = "neon")]
3c0e092e
XL
19803 unsafe fn test_vcvtmq_u64_f64() {
19804 let a: f64x2 = f64x2::new(1.1, 2.1);
19805 let e: u64x2 = u64x2::new(1, 2);
19806 let r: u64x2 = transmute(vcvtmq_u64_f64(transmute(a)));
17df50a5
XL
19807 assert_eq!(r, e);
19808 }
19809
19810 #[simd_test(enable = "neon")]
3c0e092e
XL
19811 unsafe fn test_vcvtms_u32_f32() {
19812 let a: f32 = 1.1;
19813 let e: u32 = 1;
19814 let r: u32 = transmute(vcvtms_u32_f32(transmute(a)));
17df50a5
XL
19815 assert_eq!(r, e);
19816 }
19817
19818 #[simd_test(enable = "neon")]
3c0e092e
XL
19819 unsafe fn test_vcvtmd_u64_f64() {
19820 let a: f64 = 1.1;
19821 let e: u64 = 1;
19822 let r: u64 = transmute(vcvtmd_u64_f64(transmute(a)));
17df50a5
XL
19823 assert_eq!(r, e);
19824 }
19825
19826 #[simd_test(enable = "neon")]
3c0e092e
XL
19827 unsafe fn test_vcvtp_u32_f32() {
19828 let a: f32x2 = f32x2::new(1.1, 2.1);
19829 let e: u32x2 = u32x2::new(2, 3);
19830 let r: u32x2 = transmute(vcvtp_u32_f32(transmute(a)));
17df50a5
XL
19831 assert_eq!(r, e);
19832 }
19833
19834 #[simd_test(enable = "neon")]
3c0e092e
XL
19835 unsafe fn test_vcvtpq_u32_f32() {
19836 let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19837 let e: u32x4 = u32x4::new(2, 3, 3, 4);
19838 let r: u32x4 = transmute(vcvtpq_u32_f32(transmute(a)));
17df50a5
XL
19839 assert_eq!(r, e);
19840 }
19841
19842 #[simd_test(enable = "neon")]
3c0e092e
XL
19843 unsafe fn test_vcvtp_u64_f64() {
19844 let a: f64 = 1.1;
19845 let e: u64x1 = u64x1::new(2);
19846 let r: u64x1 = transmute(vcvtp_u64_f64(transmute(a)));
19847 assert_eq!(r, e);
19848 }
19849
19850 #[simd_test(enable = "neon")]
19851 unsafe fn test_vcvtpq_u64_f64() {
19852 let a: f64x2 = f64x2::new(1.1, 2.1);
19853 let e: u64x2 = u64x2::new(2, 3);
19854 let r: u64x2 = transmute(vcvtpq_u64_f64(transmute(a)));
17df50a5
XL
19855 assert_eq!(r, e);
19856 }
19857
19858 #[simd_test(enable = "neon")]
3c0e092e
XL
19859 unsafe fn test_vcvtps_u32_f32() {
19860 let a: f32 = 1.1;
19861 let e: u32 = 2;
19862 let r: u32 = transmute(vcvtps_u32_f32(transmute(a)));
17df50a5
XL
19863 assert_eq!(r, e);
19864 }
19865
19866 #[simd_test(enable = "neon")]
3c0e092e
XL
19867 unsafe fn test_vcvtpd_u64_f64() {
19868 let a: f64 = 1.1;
19869 let e: u64 = 2;
19870 let r: u64 = transmute(vcvtpd_u64_f64(transmute(a)));
17df50a5
XL
19871 assert_eq!(r, e);
19872 }
19873
19874 #[simd_test(enable = "neon")]
3c0e092e
XL
19875 unsafe fn test_vdupq_laneq_p64() {
19876 let a: i64x2 = i64x2::new(1, 1);
19877 let e: i64x2 = i64x2::new(1, 1);
19878 let r: i64x2 = transmute(vdupq_laneq_p64::<1>(transmute(a)));
17df50a5
XL
19879 assert_eq!(r, e);
19880 }
19881
19882 #[simd_test(enable = "neon")]
3c0e092e
XL
19883 unsafe fn test_vdupq_lane_p64() {
19884 let a: i64x1 = i64x1::new(1);
19885 let e: i64x2 = i64x2::new(1, 1);
19886 let r: i64x2 = transmute(vdupq_lane_p64::<0>(transmute(a)));
17df50a5
XL
19887 assert_eq!(r, e);
19888 }
19889
19890 #[simd_test(enable = "neon")]
3c0e092e
XL
19891 unsafe fn test_vdupq_laneq_f64() {
19892 let a: f64x2 = f64x2::new(1., 1.);
19893 let e: f64x2 = f64x2::new(1., 1.);
19894 let r: f64x2 = transmute(vdupq_laneq_f64::<1>(transmute(a)));
17df50a5
XL
19895 assert_eq!(r, e);
19896 }
19897
19898 #[simd_test(enable = "neon")]
3c0e092e
XL
19899 unsafe fn test_vdupq_lane_f64() {
19900 let a: f64 = 1.;
19901 let e: f64x2 = f64x2::new(1., 1.);
19902 let r: f64x2 = transmute(vdupq_lane_f64::<0>(transmute(a)));
17df50a5
XL
19903 assert_eq!(r, e);
19904 }
19905
19906 #[simd_test(enable = "neon")]
3c0e092e
XL
19907 unsafe fn test_vdup_lane_p64() {
19908 let a: i64x1 = i64x1::new(0);
19909 let e: i64x1 = i64x1::new(0);
19910 let r: i64x1 = transmute(vdup_lane_p64::<0>(transmute(a)));
17df50a5
XL
19911 assert_eq!(r, e);
19912 }
19913
19914 #[simd_test(enable = "neon")]
3c0e092e
XL
19915 unsafe fn test_vdup_lane_f64() {
19916 let a: f64 = 0.;
19917 let e: f64 = 0.;
19918 let r: f64 = transmute(vdup_lane_f64::<0>(transmute(a)));
17df50a5
XL
19919 assert_eq!(r, e);
19920 }
19921
19922 #[simd_test(enable = "neon")]
3c0e092e
XL
19923 unsafe fn test_vdup_laneq_p64() {
19924 let a: i64x2 = i64x2::new(0, 1);
19925 let e: i64x1 = i64x1::new(1);
19926 let r: i64x1 = transmute(vdup_laneq_p64::<1>(transmute(a)));
17df50a5
XL
19927 assert_eq!(r, e);
19928 }
19929
19930 #[simd_test(enable = "neon")]
3c0e092e
XL
19931 unsafe fn test_vdup_laneq_f64() {
19932 let a: f64x2 = f64x2::new(0., 1.);
19933 let e: f64 = 1.;
19934 let r: f64 = transmute(vdup_laneq_f64::<1>(transmute(a)));
17df50a5
XL
19935 assert_eq!(r, e);
19936 }
19937
19938 #[simd_test(enable = "neon")]
3c0e092e
XL
19939 unsafe fn test_vdupb_lane_s8() {
19940 let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
19941 let e: i8 = 1;
19942 let r: i8 = transmute(vdupb_lane_s8::<4>(transmute(a)));
17df50a5
XL
19943 assert_eq!(r, e);
19944 }
19945
19946 #[simd_test(enable = "neon")]
3c0e092e
XL
19947 unsafe fn test_vdupb_laneq_s8() {
19948 let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
19949 let e: i8 = 1;
19950 let r: i8 = transmute(vdupb_laneq_s8::<8>(transmute(a)));
17df50a5
XL
19951 assert_eq!(r, e);
19952 }
19953
19954 #[simd_test(enable = "neon")]
3c0e092e
XL
19955 unsafe fn test_vduph_lane_s16() {
19956 let a: i16x4 = i16x4::new(1, 1, 1, 4);
19957 let e: i16 = 1;
19958 let r: i16 = transmute(vduph_lane_s16::<2>(transmute(a)));
17df50a5
XL
19959 assert_eq!(r, e);
19960 }
19961
19962 #[simd_test(enable = "neon")]
3c0e092e
XL
19963 unsafe fn test_vduph_laneq_s16() {
19964 let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
19965 let e: i16 = 1;
19966 let r: i16 = transmute(vduph_laneq_s16::<4>(transmute(a)));
17df50a5
XL
19967 assert_eq!(r, e);
19968 }
19969
19970 #[simd_test(enable = "neon")]
3c0e092e
XL
19971 unsafe fn test_vdups_lane_s32() {
19972 let a: i32x2 = i32x2::new(1, 1);
19973 let e: i32 = 1;
19974 let r: i32 = transmute(vdups_lane_s32::<1>(transmute(a)));
17df50a5
XL
19975 assert_eq!(r, e);
19976 }
19977
19978 #[simd_test(enable = "neon")]
3c0e092e
XL
19979 unsafe fn test_vdups_laneq_s32() {
19980 let a: i32x4 = i32x4::new(1, 1, 1, 4);
19981 let e: i32 = 1;
19982 let r: i32 = transmute(vdups_laneq_s32::<2>(transmute(a)));
17df50a5
XL
19983 assert_eq!(r, e);
19984 }
19985
19986 #[simd_test(enable = "neon")]
3c0e092e
XL
19987 unsafe fn test_vdupd_lane_s64() {
19988 let a: i64x1 = i64x1::new(1);
19989 let e: i64 = 1;
19990 let r: i64 = transmute(vdupd_lane_s64::<0>(transmute(a)));
17df50a5
XL
19991 assert_eq!(r, e);
19992 }
19993
19994 #[simd_test(enable = "neon")]
3c0e092e
XL
19995 unsafe fn test_vdupd_laneq_s64() {
19996 let a: i64x2 = i64x2::new(1, 1);
19997 let e: i64 = 1;
19998 let r: i64 = transmute(vdupd_laneq_s64::<1>(transmute(a)));
17df50a5
XL
19999 assert_eq!(r, e);
20000 }
20001
20002 #[simd_test(enable = "neon")]
3c0e092e
XL
20003 unsafe fn test_vdupb_lane_u8() {
20004 let a: u8x8 = u8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20005 let e: u8 = 1;
20006 let r: u8 = transmute(vdupb_lane_u8::<4>(transmute(a)));
17df50a5
XL
20007 assert_eq!(r, e);
20008 }
20009
20010 #[simd_test(enable = "neon")]
3c0e092e
XL
20011 unsafe fn test_vdupb_laneq_u8() {
20012 let a: u8x16 = u8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
20013 let e: u8 = 1;
20014 let r: u8 = transmute(vdupb_laneq_u8::<8>(transmute(a)));
17df50a5
XL
20015 assert_eq!(r, e);
20016 }
20017
20018 #[simd_test(enable = "neon")]
3c0e092e
XL
20019 unsafe fn test_vduph_lane_u16() {
20020 let a: u16x4 = u16x4::new(1, 1, 1, 4);
20021 let e: u16 = 1;
20022 let r: u16 = transmute(vduph_lane_u16::<2>(transmute(a)));
17df50a5
XL
20023 assert_eq!(r, e);
20024 }
20025
20026 #[simd_test(enable = "neon")]
3c0e092e
XL
20027 unsafe fn test_vduph_laneq_u16() {
20028 let a: u16x8 = u16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20029 let e: u16 = 1;
20030 let r: u16 = transmute(vduph_laneq_u16::<4>(transmute(a)));
17df50a5
XL
20031 assert_eq!(r, e);
20032 }
20033
20034 #[simd_test(enable = "neon")]
3c0e092e
XL
20035 unsafe fn test_vdups_lane_u32() {
20036 let a: u32x2 = u32x2::new(1, 1);
20037 let e: u32 = 1;
20038 let r: u32 = transmute(vdups_lane_u32::<1>(transmute(a)));
17df50a5
XL
20039 assert_eq!(r, e);
20040 }
20041
20042 #[simd_test(enable = "neon")]
3c0e092e
XL
20043 unsafe fn test_vdups_laneq_u32() {
20044 let a: u32x4 = u32x4::new(1, 1, 1, 4);
20045 let e: u32 = 1;
20046 let r: u32 = transmute(vdups_laneq_u32::<2>(transmute(a)));
17df50a5
XL
20047 assert_eq!(r, e);
20048 }
20049
20050 #[simd_test(enable = "neon")]
3c0e092e
XL
20051 unsafe fn test_vdupd_lane_u64() {
20052 let a: u64x1 = u64x1::new(1);
20053 let e: u64 = 1;
20054 let r: u64 = transmute(vdupd_lane_u64::<0>(transmute(a)));
17df50a5
XL
20055 assert_eq!(r, e);
20056 }
20057
20058 #[simd_test(enable = "neon")]
3c0e092e
XL
20059 unsafe fn test_vdupd_laneq_u64() {
20060 let a: u64x2 = u64x2::new(1, 1);
20061 let e: u64 = 1;
20062 let r: u64 = transmute(vdupd_laneq_u64::<1>(transmute(a)));
17df50a5
XL
20063 assert_eq!(r, e);
20064 }
20065
20066 #[simd_test(enable = "neon")]
3c0e092e
XL
20067 unsafe fn test_vdupb_lane_p8() {
20068 let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20069 let e: p8 = 1;
20070 let r: p8 = transmute(vdupb_lane_p8::<4>(transmute(a)));
17df50a5
XL
20071 assert_eq!(r, e);
20072 }
20073
20074 #[simd_test(enable = "neon")]
3c0e092e
XL
20075 unsafe fn test_vdupb_laneq_p8() {
20076 let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
20077 let e: p8 = 1;
20078 let r: p8 = transmute(vdupb_laneq_p8::<8>(transmute(a)));
17df50a5
XL
20079 assert_eq!(r, e);
20080 }
20081
20082 #[simd_test(enable = "neon")]
3c0e092e
XL
20083 unsafe fn test_vduph_lane_p16() {
20084 let a: i16x4 = i16x4::new(1, 1, 1, 4);
20085 let e: p16 = 1;
20086 let r: p16 = transmute(vduph_lane_p16::<2>(transmute(a)));
17df50a5
XL
20087 assert_eq!(r, e);
20088 }
20089
20090 #[simd_test(enable = "neon")]
3c0e092e
XL
20091 unsafe fn test_vduph_laneq_p16() {
20092 let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20093 let e: p16 = 1;
20094 let r: p16 = transmute(vduph_laneq_p16::<4>(transmute(a)));
17df50a5
XL
20095 assert_eq!(r, e);
20096 }
20097
20098 #[simd_test(enable = "neon")]
3c0e092e
XL
20099 unsafe fn test_vdups_lane_f32() {
20100 let a: f32x2 = f32x2::new(1., 1.);
20101 let e: f32 = 1.;
20102 let r: f32 = transmute(vdups_lane_f32::<1>(transmute(a)));
17df50a5 20103 assert_eq!(r, e);
3c0e092e
XL
20104 }
20105
20106 #[simd_test(enable = "neon")]
20107 unsafe fn test_vdups_laneq_f32() {
20108 let a: f32x4 = f32x4::new(1., 1., 1., 4.);
20109 let e: f32 = 1.;
20110 let r: f32 = transmute(vdups_laneq_f32::<2>(transmute(a)));
17df50a5
XL
20111 assert_eq!(r, e);
20112 }
20113
20114 #[simd_test(enable = "neon")]
3c0e092e
XL
20115 unsafe fn test_vdupd_lane_f64() {
20116 let a: f64 = 1.;
20117 let e: f64 = 1.;
20118 let r: f64 = transmute(vdupd_lane_f64::<0>(transmute(a)));
17df50a5
XL
20119 assert_eq!(r, e);
20120 }
20121
20122 #[simd_test(enable = "neon")]
3c0e092e
XL
20123 unsafe fn test_vdupd_laneq_f64() {
20124 let a: f64x2 = f64x2::new(1., 1.);
20125 let e: f64 = 1.;
20126 let r: f64 = transmute(vdupd_laneq_f64::<1>(transmute(a)));
17df50a5
XL
20127 assert_eq!(r, e);
20128 }
20129
20130 #[simd_test(enable = "neon")]
3c0e092e 20131 unsafe fn test_vextq_p64() {
f2b60f7d
FG
20132 let a: i64x2 = i64x2::new(1, 1);
20133 let b: i64x2 = i64x2::new(2, 2);
20134 let e: i64x2 = i64x2::new(1, 2);
3c0e092e 20135 let r: i64x2 = transmute(vextq_p64::<1>(transmute(a), transmute(b)));
17df50a5
XL
20136 assert_eq!(r, e);
20137 }
20138
20139 #[simd_test(enable = "neon")]
3c0e092e 20140 unsafe fn test_vextq_f64() {
f2b60f7d
FG
20141 let a: f64x2 = f64x2::new(1., 1.);
20142 let b: f64x2 = f64x2::new(2., 2.);
20143 let e: f64x2 = f64x2::new(1., 2.);
3c0e092e 20144 let r: f64x2 = transmute(vextq_f64::<1>(transmute(a), transmute(b)));
17df50a5
XL
20145 assert_eq!(r, e);
20146 }
20147
20148 #[simd_test(enable = "neon")]
3c0e092e
XL
20149 unsafe fn test_vmla_f64() {
20150 let a: f64 = 0.;
20151 let b: f64 = 2.;
20152 let c: f64 = 3.;
20153 let e: f64 = 6.;
20154 let r: f64 = transmute(vmla_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20155 assert_eq!(r, e);
20156 }
20157
20158 #[simd_test(enable = "neon")]
3c0e092e
XL
20159 unsafe fn test_vmlaq_f64() {
20160 let a: f64x2 = f64x2::new(0., 1.);
20161 let b: f64x2 = f64x2::new(2., 2.);
20162 let c: f64x2 = f64x2::new(3., 3.);
20163 let e: f64x2 = f64x2::new(6., 7.);
20164 let r: f64x2 = transmute(vmlaq_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20165 assert_eq!(r, e);
20166 }
20167
20168 #[simd_test(enable = "neon")]
3c0e092e
XL
20169 unsafe fn test_vmlal_high_s8() {
20170 let a: i16x8 = i16x8::new(8, 7, 6, 5, 4, 3, 2, 1);
20171 let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20172 let c: i8x16 = i8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20173 let e: i16x8 = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
20174 let r: i16x8 = transmute(vmlal_high_s8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20175 assert_eq!(r, e);
20176 }
20177
20178 #[simd_test(enable = "neon")]
3c0e092e
XL
20179 unsafe fn test_vmlal_high_s16() {
20180 let a: i32x4 = i32x4::new(8, 7, 6, 5);
20181 let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20182 let c: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20183 let e: i32x4 = i32x4::new(8, 9, 10, 11);
20184 let r: i32x4 = transmute(vmlal_high_s16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20185 assert_eq!(r, e);
20186 }
20187
20188 #[simd_test(enable = "neon")]
3c0e092e
XL
20189 unsafe fn test_vmlal_high_s32() {
20190 let a: i64x2 = i64x2::new(8, 7);
20191 let b: i32x4 = i32x4::new(2, 2, 2, 2);
20192 let c: i32x4 = i32x4::new(3, 3, 0, 1);
20193 let e: i64x2 = i64x2::new(8, 9);
20194 let r: i64x2 = transmute(vmlal_high_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20195 assert_eq!(r, e);
20196 }
20197
20198 #[simd_test(enable = "neon")]
3c0e092e
XL
20199 unsafe fn test_vmlal_high_u8() {
20200 let a: u16x8 = u16x8::new(8, 7, 6, 5, 4, 3, 2, 1);
20201 let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20202 let c: u8x16 = u8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20203 let e: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
20204 let r: u16x8 = transmute(vmlal_high_u8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20205 assert_eq!(r, e);
20206 }
20207
20208 #[simd_test(enable = "neon")]
3c0e092e
XL
20209 unsafe fn test_vmlal_high_u16() {
20210 let a: u32x4 = u32x4::new(8, 7, 6, 5);
20211 let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20212 let c: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20213 let e: u32x4 = u32x4::new(8, 9, 10, 11);
20214 let r: u32x4 = transmute(vmlal_high_u16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20215 assert_eq!(r, e);
20216 }
20217
20218 #[simd_test(enable = "neon")]
3c0e092e
XL
20219 unsafe fn test_vmlal_high_u32() {
20220 let a: u64x2 = u64x2::new(8, 7);
20221 let b: u32x4 = u32x4::new(2, 2, 2, 2);
20222 let c: u32x4 = u32x4::new(3, 3, 0, 1);
20223 let e: u64x2 = u64x2::new(8, 9);
20224 let r: u64x2 = transmute(vmlal_high_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20225 assert_eq!(r, e);
20226 }
20227
20228 #[simd_test(enable = "neon")]
3c0e092e
XL
20229 unsafe fn test_vmlal_high_n_s16() {
20230 let a: i32x4 = i32x4::new(8, 7, 6, 5);
20231 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20232 let c: i16 = 2;
20233 let e: i32x4 = i32x4::new(8, 9, 10, 11);
20234 let r: i32x4 = transmute(vmlal_high_n_s16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20235 assert_eq!(r, e);
20236 }
20237
20238 #[simd_test(enable = "neon")]
3c0e092e
XL
20239 unsafe fn test_vmlal_high_n_s32() {
20240 let a: i64x2 = i64x2::new(8, 7);
20241 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20242 let c: i32 = 2;
20243 let e: i64x2 = i64x2::new(8, 9);
20244 let r: i64x2 = transmute(vmlal_high_n_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20245 assert_eq!(r, e);
20246 }
20247
20248 #[simd_test(enable = "neon")]
3c0e092e
XL
20249 unsafe fn test_vmlal_high_n_u16() {
20250 let a: u32x4 = u32x4::new(8, 7, 6, 5);
20251 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20252 let c: u16 = 2;
20253 let e: u32x4 = u32x4::new(8, 9, 10, 11);
20254 let r: u32x4 = transmute(vmlal_high_n_u16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20255 assert_eq!(r, e);
20256 }
20257
20258 #[simd_test(enable = "neon")]
3c0e092e
XL
20259 unsafe fn test_vmlal_high_n_u32() {
20260 let a: u64x2 = u64x2::new(8, 7);
20261 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20262 let c: u32 = 2;
20263 let e: u64x2 = u64x2::new(8, 9);
20264 let r: u64x2 = transmute(vmlal_high_n_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20265 assert_eq!(r, e);
20266 }
20267
20268 #[simd_test(enable = "neon")]
3c0e092e
XL
20269 unsafe fn test_vmlal_high_lane_s16() {
20270 let a: i32x4 = i32x4::new(8, 7, 6, 5);
20271 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20272 let c: i16x4 = i16x4::new(0, 2, 0, 0);
20273 let e: i32x4 = i32x4::new(8, 9, 10, 11);
20274 let r: i32x4 = transmute(vmlal_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20275 assert_eq!(r, e);
20276 }
20277
20278 #[simd_test(enable = "neon")]
3c0e092e
XL
20279 unsafe fn test_vmlal_high_laneq_s16() {
20280 let a: i32x4 = i32x4::new(8, 7, 6, 5);
20281 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20282 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20283 let e: i32x4 = i32x4::new(8, 9, 10, 11);
20284 let r: i32x4 = transmute(vmlal_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20285 assert_eq!(r, e);
20286 }
20287
20288 #[simd_test(enable = "neon")]
3c0e092e
XL
20289 unsafe fn test_vmlal_high_lane_s32() {
20290 let a: i64x2 = i64x2::new(8, 7);
20291 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20292 let c: i32x2 = i32x2::new(0, 2);
20293 let e: i64x2 = i64x2::new(8, 9);
20294 let r: i64x2 = transmute(vmlal_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20295 assert_eq!(r, e);
20296 }
20297
20298 #[simd_test(enable = "neon")]
3c0e092e
XL
20299 unsafe fn test_vmlal_high_laneq_s32() {
20300 let a: i64x2 = i64x2::new(8, 7);
20301 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20302 let c: i32x4 = i32x4::new(0, 2, 0, 0);
20303 let e: i64x2 = i64x2::new(8, 9);
20304 let r: i64x2 = transmute(vmlal_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20305 assert_eq!(r, e);
20306 }
20307
20308 #[simd_test(enable = "neon")]
3c0e092e
XL
20309 unsafe fn test_vmlal_high_lane_u16() {
20310 let a: u32x4 = u32x4::new(8, 7, 6, 5);
20311 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20312 let c: u16x4 = u16x4::new(0, 2, 0, 0);
20313 let e: u32x4 = u32x4::new(8, 9, 10, 11);
20314 let r: u32x4 = transmute(vmlal_high_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20315 assert_eq!(r, e);
20316 }
20317
20318 #[simd_test(enable = "neon")]
3c0e092e
XL
20319 unsafe fn test_vmlal_high_laneq_u16() {
20320 let a: u32x4 = u32x4::new(8, 7, 6, 5);
20321 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20322 let c: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20323 let e: u32x4 = u32x4::new(8, 9, 10, 11);
20324 let r: u32x4 = transmute(vmlal_high_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20325 assert_eq!(r, e);
20326 }
20327
20328 #[simd_test(enable = "neon")]
3c0e092e
XL
20329 unsafe fn test_vmlal_high_lane_u32() {
20330 let a: u64x2 = u64x2::new(8, 7);
20331 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20332 let c: u32x2 = u32x2::new(0, 2);
20333 let e: u64x2 = u64x2::new(8, 9);
20334 let r: u64x2 = transmute(vmlal_high_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20335 assert_eq!(r, e);
20336 }
20337
20338 #[simd_test(enable = "neon")]
3c0e092e
XL
20339 unsafe fn test_vmlal_high_laneq_u32() {
20340 let a: u64x2 = u64x2::new(8, 7);
20341 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20342 let c: u32x4 = u32x4::new(0, 2, 0, 0);
20343 let e: u64x2 = u64x2::new(8, 9);
20344 let r: u64x2 = transmute(vmlal_high_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20345 assert_eq!(r, e);
20346 }
20347
20348 #[simd_test(enable = "neon")]
3c0e092e
XL
20349 unsafe fn test_vmls_f64() {
20350 let a: f64 = 6.;
20351 let b: f64 = 2.;
20352 let c: f64 = 3.;
20353 let e: f64 = 0.;
20354 let r: f64 = transmute(vmls_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20355 assert_eq!(r, e);
20356 }
20357
20358 #[simd_test(enable = "neon")]
3c0e092e
XL
20359 unsafe fn test_vmlsq_f64() {
20360 let a: f64x2 = f64x2::new(6., 7.);
20361 let b: f64x2 = f64x2::new(2., 2.);
20362 let c: f64x2 = f64x2::new(3., 3.);
20363 let e: f64x2 = f64x2::new(0., 1.);
20364 let r: f64x2 = transmute(vmlsq_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20365 assert_eq!(r, e);
20366 }
20367
20368 #[simd_test(enable = "neon")]
3c0e092e
XL
20369 unsafe fn test_vmlsl_high_s8() {
20370 let a: i16x8 = i16x8::new(14, 15, 16, 17, 18, 19, 20, 21);
20371 let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20372 let c: i8x16 = i8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20373 let e: i16x8 = i16x8::new(14, 13, 12, 11, 10, 9, 8, 7);
20374 let r: i16x8 = transmute(vmlsl_high_s8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20375 assert_eq!(r, e);
20376 }
20377
20378 #[simd_test(enable = "neon")]
3c0e092e
XL
20379 unsafe fn test_vmlsl_high_s16() {
20380 let a: i32x4 = i32x4::new(14, 15, 16, 17);
20381 let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20382 let c: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20383 let e: i32x4 = i32x4::new(14, 13, 12, 11);
20384 let r: i32x4 = transmute(vmlsl_high_s16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20385 assert_eq!(r, e);
20386 }
20387
20388 #[simd_test(enable = "neon")]
3c0e092e
XL
20389 unsafe fn test_vmlsl_high_s32() {
20390 let a: i64x2 = i64x2::new(14, 15);
20391 let b: i32x4 = i32x4::new(2, 2, 2, 2);
20392 let c: i32x4 = i32x4::new(3, 3, 0, 1);
20393 let e: i64x2 = i64x2::new(14, 13);
20394 let r: i64x2 = transmute(vmlsl_high_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20395 assert_eq!(r, e);
20396 }
20397
20398 #[simd_test(enable = "neon")]
3c0e092e
XL
20399 unsafe fn test_vmlsl_high_u8() {
20400 let a: u16x8 = u16x8::new(14, 15, 16, 17, 18, 19, 20, 21);
20401 let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20402 let c: u8x16 = u8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20403 let e: u16x8 = u16x8::new(14, 13, 12, 11, 10, 9, 8, 7);
20404 let r: u16x8 = transmute(vmlsl_high_u8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20405 assert_eq!(r, e);
20406 }
20407
20408 #[simd_test(enable = "neon")]
3c0e092e
XL
20409 unsafe fn test_vmlsl_high_u16() {
20410 let a: u32x4 = u32x4::new(14, 15, 16, 17);
20411 let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20412 let c: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20413 let e: u32x4 = u32x4::new(14, 13, 12, 11);
20414 let r: u32x4 = transmute(vmlsl_high_u16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20415 assert_eq!(r, e);
20416 }
20417
20418 #[simd_test(enable = "neon")]
3c0e092e
XL
20419 unsafe fn test_vmlsl_high_u32() {
20420 let a: u64x2 = u64x2::new(14, 15);
20421 let b: u32x4 = u32x4::new(2, 2, 2, 2);
20422 let c: u32x4 = u32x4::new(3, 3, 0, 1);
20423 let e: u64x2 = u64x2::new(14, 13);
20424 let r: u64x2 = transmute(vmlsl_high_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20425 assert_eq!(r, e);
20426 }
20427
20428 #[simd_test(enable = "neon")]
3c0e092e
XL
20429 unsafe fn test_vmlsl_high_n_s16() {
20430 let a: i32x4 = i32x4::new(14, 15, 16, 17);
20431 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20432 let c: i16 = 2;
20433 let e: i32x4 = i32x4::new(14, 13, 12, 11);
20434 let r: i32x4 = transmute(vmlsl_high_n_s16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20435 assert_eq!(r, e);
20436 }
20437
20438 #[simd_test(enable = "neon")]
3c0e092e
XL
20439 unsafe fn test_vmlsl_high_n_s32() {
20440 let a: i64x2 = i64x2::new(14, 15);
20441 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20442 let c: i32 = 2;
20443 let e: i64x2 = i64x2::new(14, 13);
20444 let r: i64x2 = transmute(vmlsl_high_n_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20445 assert_eq!(r, e);
20446 }
20447
20448 #[simd_test(enable = "neon")]
3c0e092e
XL
20449 unsafe fn test_vmlsl_high_n_u16() {
20450 let a: u32x4 = u32x4::new(14, 15, 16, 17);
20451 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20452 let c: u16 = 2;
20453 let e: u32x4 = u32x4::new(14, 13, 12, 11);
20454 let r: u32x4 = transmute(vmlsl_high_n_u16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20455 assert_eq!(r, e);
20456 }
20457
20458 #[simd_test(enable = "neon")]
3c0e092e
XL
20459 unsafe fn test_vmlsl_high_n_u32() {
20460 let a: u64x2 = u64x2::new(14, 15);
20461 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20462 let c: u32 = 2;
20463 let e: u64x2 = u64x2::new(14, 13);
20464 let r: u64x2 = transmute(vmlsl_high_n_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20465 assert_eq!(r, e);
20466 }
20467
20468 #[simd_test(enable = "neon")]
3c0e092e
XL
20469 unsafe fn test_vmlsl_high_lane_s16() {
20470 let a: i32x4 = i32x4::new(14, 15, 16, 17);
20471 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20472 let c: i16x4 = i16x4::new(0, 2, 0, 0);
20473 let e: i32x4 = i32x4::new(14, 13, 12, 11);
20474 let r: i32x4 = transmute(vmlsl_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20475 assert_eq!(r, e);
20476 }
20477
20478 #[simd_test(enable = "neon")]
3c0e092e
XL
20479 unsafe fn test_vmlsl_high_laneq_s16() {
20480 let a: i32x4 = i32x4::new(14, 15, 16, 17);
20481 let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20482 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20483 let e: i32x4 = i32x4::new(14, 13, 12, 11);
20484 let r: i32x4 = transmute(vmlsl_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20485 assert_eq!(r, e);
20486 }
20487
20488 #[simd_test(enable = "neon")]
3c0e092e
XL
20489 unsafe fn test_vmlsl_high_lane_s32() {
20490 let a: i64x2 = i64x2::new(14, 15);
20491 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20492 let c: i32x2 = i32x2::new(0, 2);
20493 let e: i64x2 = i64x2::new(14, 13);
20494 let r: i64x2 = transmute(vmlsl_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20495 assert_eq!(r, e);
20496 }
20497
20498 #[simd_test(enable = "neon")]
3c0e092e
XL
20499 unsafe fn test_vmlsl_high_laneq_s32() {
20500 let a: i64x2 = i64x2::new(14, 15);
20501 let b: i32x4 = i32x4::new(3, 3, 0, 1);
20502 let c: i32x4 = i32x4::new(0, 2, 0, 0);
20503 let e: i64x2 = i64x2::new(14, 13);
20504 let r: i64x2 = transmute(vmlsl_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20505 assert_eq!(r, e);
20506 }
20507
20508 #[simd_test(enable = "neon")]
3c0e092e
XL
20509 unsafe fn test_vmlsl_high_lane_u16() {
20510 let a: u32x4 = u32x4::new(14, 15, 16, 17);
20511 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20512 let c: u16x4 = u16x4::new(0, 2, 0, 0);
20513 let e: u32x4 = u32x4::new(14, 13, 12, 11);
20514 let r: u32x4 = transmute(vmlsl_high_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20515 assert_eq!(r, e);
20516 }
20517
20518 #[simd_test(enable = "neon")]
3c0e092e
XL
20519 unsafe fn test_vmlsl_high_laneq_u16() {
20520 let a: u32x4 = u32x4::new(14, 15, 16, 17);
20521 let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20522 let c: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20523 let e: u32x4 = u32x4::new(14, 13, 12, 11);
20524 let r: u32x4 = transmute(vmlsl_high_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20525 assert_eq!(r, e);
20526 }
20527
20528 #[simd_test(enable = "neon")]
3c0e092e
XL
20529 unsafe fn test_vmlsl_high_lane_u32() {
20530 let a: u64x2 = u64x2::new(14, 15);
20531 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20532 let c: u32x2 = u32x2::new(0, 2);
20533 let e: u64x2 = u64x2::new(14, 13);
20534 let r: u64x2 = transmute(vmlsl_high_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20535 assert_eq!(r, e);
20536 }
20537
20538 #[simd_test(enable = "neon")]
3c0e092e
XL
20539 unsafe fn test_vmlsl_high_laneq_u32() {
20540 let a: u64x2 = u64x2::new(14, 15);
20541 let b: u32x4 = u32x4::new(3, 3, 0, 1);
20542 let c: u32x4 = u32x4::new(0, 2, 0, 0);
20543 let e: u64x2 = u64x2::new(14, 13);
20544 let r: u64x2 = transmute(vmlsl_high_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
20545 assert_eq!(r, e);
20546 }
20547
20548 #[simd_test(enable = "neon")]
3c0e092e
XL
20549 unsafe fn test_vmovn_high_s16() {
20550 let a: i8x8 = i8x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20551 let b: i16x8 = i16x8::new(2, 3, 4, 5, 12, 13, 14, 15);
20552 let e: i8x16 = i8x16::new(0, 1, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 12, 13, 14, 15);
20553 let r: i8x16 = transmute(vmovn_high_s16(transmute(a), transmute(b)));
17df50a5
XL
20554 assert_eq!(r, e);
20555 }
20556
20557 #[simd_test(enable = "neon")]
3c0e092e
XL
20558 unsafe fn test_vmovn_high_s32() {
20559 let a: i16x4 = i16x4::new(0, 1, 2, 3);
20560 let b: i32x4 = i32x4::new(2, 3, 4, 5);
20561 let e: i16x8 = i16x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20562 let r: i16x8 = transmute(vmovn_high_s32(transmute(a), transmute(b)));
17df50a5
XL
20563 assert_eq!(r, e);
20564 }
20565
20566 #[simd_test(enable = "neon")]
3c0e092e
XL
20567 unsafe fn test_vmovn_high_s64() {
20568 let a: i32x2 = i32x2::new(0, 1);
20569 let b: i64x2 = i64x2::new(2, 3);
20570 let e: i32x4 = i32x4::new(0, 1, 2, 3);
20571 let r: i32x4 = transmute(vmovn_high_s64(transmute(a), transmute(b)));
20572 assert_eq!(r, e);
20573 }
20574
20575 #[simd_test(enable = "neon")]
20576 unsafe fn test_vmovn_high_u16() {
20577 let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20578 let b: u16x8 = u16x8::new(2, 3, 4, 5, 12, 13, 14, 15);
20579 let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 12, 13, 14, 15);
20580 let r: u8x16 = transmute(vmovn_high_u16(transmute(a), transmute(b)));
17df50a5
XL
20581 assert_eq!(r, e);
20582 }
20583
20584 #[simd_test(enable = "neon")]
3c0e092e
XL
20585 unsafe fn test_vmovn_high_u32() {
20586 let a: u16x4 = u16x4::new(0, 1, 2, 3);
20587 let b: u32x4 = u32x4::new(2, 3, 4, 5);
20588 let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20589 let r: u16x8 = transmute(vmovn_high_u32(transmute(a), transmute(b)));
17df50a5
XL
20590 assert_eq!(r, e);
20591 }
20592
20593 #[simd_test(enable = "neon")]
3c0e092e
XL
20594 unsafe fn test_vmovn_high_u64() {
20595 let a: u32x2 = u32x2::new(0, 1);
20596 let b: u64x2 = u64x2::new(2, 3);
20597 let e: u32x4 = u32x4::new(0, 1, 2, 3);
20598 let r: u32x4 = transmute(vmovn_high_u64(transmute(a), transmute(b)));
17df50a5
XL
20599 assert_eq!(r, e);
20600 }
20601
20602 #[simd_test(enable = "neon")]
3c0e092e
XL
20603 unsafe fn test_vneg_s64() {
20604 let a: i64x1 = i64x1::new(0);
20605 let e: i64x1 = i64x1::new(0);
20606 let r: i64x1 = transmute(vneg_s64(transmute(a)));
17df50a5
XL
20607 assert_eq!(r, e);
20608 }
20609
20610 #[simd_test(enable = "neon")]
3c0e092e
XL
20611 unsafe fn test_vnegq_s64() {
20612 let a: i64x2 = i64x2::new(0, 1);
20613 let e: i64x2 = i64x2::new(0, -1);
20614 let r: i64x2 = transmute(vnegq_s64(transmute(a)));
17df50a5
XL
20615 assert_eq!(r, e);
20616 }
20617
20618 #[simd_test(enable = "neon")]
3c0e092e
XL
20619 unsafe fn test_vnegd_s64() {
20620 let a: i64 = 1;
20621 let e: i64 = -1;
20622 let r: i64 = transmute(vnegd_s64(transmute(a)));
17df50a5
XL
20623 assert_eq!(r, e);
20624 }
20625
20626 #[simd_test(enable = "neon")]
3c0e092e
XL
20627 unsafe fn test_vneg_f64() {
20628 let a: f64 = 0.;
20629 let e: f64 = 0.;
20630 let r: f64 = transmute(vneg_f64(transmute(a)));
17df50a5
XL
20631 assert_eq!(r, e);
20632 }
20633
20634 #[simd_test(enable = "neon")]
3c0e092e
XL
20635 unsafe fn test_vnegq_f64() {
20636 let a: f64x2 = f64x2::new(0., 1.);
20637 let e: f64x2 = f64x2::new(0., -1.);
20638 let r: f64x2 = transmute(vnegq_f64(transmute(a)));
17df50a5
XL
20639 assert_eq!(r, e);
20640 }
20641
20642 #[simd_test(enable = "neon")]
3c0e092e
XL
20643 unsafe fn test_vqneg_s64() {
20644 let a: i64x1 = i64x1::new(-9223372036854775808);
20645 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
20646 let r: i64x1 = transmute(vqneg_s64(transmute(a)));
17df50a5
XL
20647 assert_eq!(r, e);
20648 }
20649
20650 #[simd_test(enable = "neon")]
3c0e092e
XL
20651 unsafe fn test_vqnegq_s64() {
20652 let a: i64x2 = i64x2::new(-9223372036854775808, 0);
20653 let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 0);
20654 let r: i64x2 = transmute(vqnegq_s64(transmute(a)));
17df50a5
XL
20655 assert_eq!(r, e);
20656 }
20657
20658 #[simd_test(enable = "neon")]
3c0e092e
XL
20659 unsafe fn test_vqnegb_s8() {
20660 let a: i8 = 1;
20661 let e: i8 = -1;
20662 let r: i8 = transmute(vqnegb_s8(transmute(a)));
17df50a5
XL
20663 assert_eq!(r, e);
20664 }
20665
20666 #[simd_test(enable = "neon")]
3c0e092e
XL
20667 unsafe fn test_vqnegh_s16() {
20668 let a: i16 = 1;
20669 let e: i16 = -1;
20670 let r: i16 = transmute(vqnegh_s16(transmute(a)));
17df50a5
XL
20671 assert_eq!(r, e);
20672 }
20673
20674 #[simd_test(enable = "neon")]
3c0e092e
XL
20675 unsafe fn test_vqnegs_s32() {
20676 let a: i32 = 1;
20677 let e: i32 = -1;
20678 let r: i32 = transmute(vqnegs_s32(transmute(a)));
17df50a5
XL
20679 assert_eq!(r, e);
20680 }
20681
20682 #[simd_test(enable = "neon")]
3c0e092e
XL
20683 unsafe fn test_vqnegd_s64() {
20684 let a: i64 = 1;
20685 let e: i64 = -1;
20686 let r: i64 = transmute(vqnegd_s64(transmute(a)));
17df50a5
XL
20687 assert_eq!(r, e);
20688 }
20689
20690 #[simd_test(enable = "neon")]
3c0e092e
XL
20691 unsafe fn test_vqsubb_s8() {
20692 let a: i8 = 42;
20693 let b: i8 = 1;
20694 let e: i8 = 41;
20695 let r: i8 = transmute(vqsubb_s8(transmute(a), transmute(b)));
17df50a5
XL
20696 assert_eq!(r, e);
20697 }
20698
20699 #[simd_test(enable = "neon")]
3c0e092e
XL
20700 unsafe fn test_vqsubh_s16() {
20701 let a: i16 = 42;
20702 let b: i16 = 1;
20703 let e: i16 = 41;
20704 let r: i16 = transmute(vqsubh_s16(transmute(a), transmute(b)));
17df50a5
XL
20705 assert_eq!(r, e);
20706 }
20707
20708 #[simd_test(enable = "neon")]
3c0e092e
XL
20709 unsafe fn test_vqsubb_u8() {
20710 let a: u8 = 42;
20711 let b: u8 = 1;
20712 let e: u8 = 41;
20713 let r: u8 = transmute(vqsubb_u8(transmute(a), transmute(b)));
17df50a5
XL
20714 assert_eq!(r, e);
20715 }
20716
20717 #[simd_test(enable = "neon")]
3c0e092e
XL
20718 unsafe fn test_vqsubh_u16() {
20719 let a: u16 = 42;
20720 let b: u16 = 1;
20721 let e: u16 = 41;
20722 let r: u16 = transmute(vqsubh_u16(transmute(a), transmute(b)));
17df50a5
XL
20723 assert_eq!(r, e);
20724 }
20725
20726 #[simd_test(enable = "neon")]
3c0e092e
XL
20727 unsafe fn test_vqsubs_u32() {
20728 let a: u32 = 42;
20729 let b: u32 = 1;
20730 let e: u32 = 41;
20731 let r: u32 = transmute(vqsubs_u32(transmute(a), transmute(b)));
17df50a5
XL
20732 assert_eq!(r, e);
20733 }
20734
20735 #[simd_test(enable = "neon")]
3c0e092e
XL
20736 unsafe fn test_vqsubd_u64() {
20737 let a: u64 = 42;
20738 let b: u64 = 1;
20739 let e: u64 = 41;
20740 let r: u64 = transmute(vqsubd_u64(transmute(a), transmute(b)));
17df50a5
XL
20741 assert_eq!(r, e);
20742 }
20743
20744 #[simd_test(enable = "neon")]
3c0e092e
XL
20745 unsafe fn test_vqsubs_s32() {
20746 let a: i32 = 42;
20747 let b: i32 = 1;
20748 let e: i32 = 41;
20749 let r: i32 = transmute(vqsubs_s32(transmute(a), transmute(b)));
17df50a5
XL
20750 assert_eq!(r, e);
20751 }
20752
20753 #[simd_test(enable = "neon")]
3c0e092e
XL
20754 unsafe fn test_vqsubd_s64() {
20755 let a: i64 = 42;
20756 let b: i64 = 1;
20757 let e: i64 = 41;
20758 let r: i64 = transmute(vqsubd_s64(transmute(a), transmute(b)));
17df50a5
XL
20759 assert_eq!(r, e);
20760 }
20761
20762 #[simd_test(enable = "neon")]
3c0e092e
XL
20763 unsafe fn test_vrbit_s8() {
20764 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20765 let e: i8x8 = i8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20766 let r: i8x8 = transmute(vrbit_s8(transmute(a)));
17df50a5
XL
20767 assert_eq!(r, e);
20768 }
20769
20770 #[simd_test(enable = "neon")]
3c0e092e
XL
20771 unsafe fn test_vrbitq_s8() {
20772 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20773 let e: i8x16 = i8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20774 let r: i8x16 = transmute(vrbitq_s8(transmute(a)));
17df50a5
XL
20775 assert_eq!(r, e);
20776 }
20777
20778 #[simd_test(enable = "neon")]
3c0e092e
XL
20779 unsafe fn test_vrbit_u8() {
20780 let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20781 let e: u8x8 = u8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20782 let r: u8x8 = transmute(vrbit_u8(transmute(a)));
17df50a5
XL
20783 assert_eq!(r, e);
20784 }
20785
20786 #[simd_test(enable = "neon")]
3c0e092e
XL
20787 unsafe fn test_vrbitq_u8() {
20788 let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20789 let e: u8x16 = u8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20790 let r: u8x16 = transmute(vrbitq_u8(transmute(a)));
17df50a5
XL
20791 assert_eq!(r, e);
20792 }
20793
20794 #[simd_test(enable = "neon")]
3c0e092e
XL
20795 unsafe fn test_vrbit_p8() {
20796 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20797 let e: i8x8 = i8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20798 let r: i8x8 = transmute(vrbit_p8(transmute(a)));
17df50a5
XL
20799 assert_eq!(r, e);
20800 }
20801
20802 #[simd_test(enable = "neon")]
3c0e092e
XL
20803 unsafe fn test_vrbitq_p8() {
20804 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20805 let e: i8x16 = i8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20806 let r: i8x16 = transmute(vrbitq_p8(transmute(a)));
17df50a5
XL
20807 assert_eq!(r, e);
20808 }
20809
20810 #[simd_test(enable = "neon")]
3c0e092e
XL
20811 unsafe fn test_vrndx_f32() {
20812 let a: f32x2 = f32x2::new(-1.5, 0.5);
20813 let e: f32x2 = f32x2::new(-2.0, 0.0);
20814 let r: f32x2 = transmute(vrndx_f32(transmute(a)));
17df50a5
XL
20815 assert_eq!(r, e);
20816 }
20817
20818 #[simd_test(enable = "neon")]
3c0e092e
XL
20819 unsafe fn test_vrndxq_f32() {
20820 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20821 let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
20822 let r: f32x4 = transmute(vrndxq_f32(transmute(a)));
17df50a5
XL
20823 assert_eq!(r, e);
20824 }
20825
20826 #[simd_test(enable = "neon")]
3c0e092e
XL
20827 unsafe fn test_vrndx_f64() {
20828 let a: f64 = -1.5;
20829 let e: f64 = -2.0;
20830 let r: f64 = transmute(vrndx_f64(transmute(a)));
17df50a5
XL
20831 assert_eq!(r, e);
20832 }
20833
20834 #[simd_test(enable = "neon")]
3c0e092e
XL
20835 unsafe fn test_vrndxq_f64() {
20836 let a: f64x2 = f64x2::new(-1.5, 0.5);
20837 let e: f64x2 = f64x2::new(-2.0, 0.0);
20838 let r: f64x2 = transmute(vrndxq_f64(transmute(a)));
17df50a5
XL
20839 assert_eq!(r, e);
20840 }
20841
20842 #[simd_test(enable = "neon")]
3c0e092e
XL
20843 unsafe fn test_vrnda_f32() {
20844 let a: f32x2 = f32x2::new(-1.5, 0.5);
20845 let e: f32x2 = f32x2::new(-2.0, 1.0);
20846 let r: f32x2 = transmute(vrnda_f32(transmute(a)));
17df50a5
XL
20847 assert_eq!(r, e);
20848 }
20849
20850 #[simd_test(enable = "neon")]
3c0e092e
XL
20851 unsafe fn test_vrndaq_f32() {
20852 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20853 let e: f32x4 = f32x4::new(-2.0, 1.0, 2.0, 3.0);
20854 let r: f32x4 = transmute(vrndaq_f32(transmute(a)));
17df50a5
XL
20855 assert_eq!(r, e);
20856 }
20857
20858 #[simd_test(enable = "neon")]
3c0e092e
XL
20859 unsafe fn test_vrnda_f64() {
20860 let a: f64 = -1.5;
20861 let e: f64 = -2.0;
20862 let r: f64 = transmute(vrnda_f64(transmute(a)));
17df50a5
XL
20863 assert_eq!(r, e);
20864 }
20865
20866 #[simd_test(enable = "neon")]
3c0e092e
XL
20867 unsafe fn test_vrndaq_f64() {
20868 let a: f64x2 = f64x2::new(-1.5, 0.5);
20869 let e: f64x2 = f64x2::new(-2.0, 1.0);
20870 let r: f64x2 = transmute(vrndaq_f64(transmute(a)));
17df50a5
XL
20871 assert_eq!(r, e);
20872 }
20873
20874 #[simd_test(enable = "neon")]
3c0e092e
XL
20875 unsafe fn test_vrndn_f64() {
20876 let a: f64 = -1.5;
20877 let e: f64 = -2.0;
20878 let r: f64 = transmute(vrndn_f64(transmute(a)));
17df50a5
XL
20879 assert_eq!(r, e);
20880 }
20881
20882 #[simd_test(enable = "neon")]
3c0e092e
XL
20883 unsafe fn test_vrndnq_f64() {
20884 let a: f64x2 = f64x2::new(-1.5, 0.5);
20885 let e: f64x2 = f64x2::new(-2.0, 0.0);
20886 let r: f64x2 = transmute(vrndnq_f64(transmute(a)));
17df50a5
XL
20887 assert_eq!(r, e);
20888 }
20889
20890 #[simd_test(enable = "neon")]
3c0e092e
XL
20891 unsafe fn test_vrndns_f32() {
20892 let a: f32 = -1.5;
20893 let e: f32 = -2.0;
20894 let r: f32 = transmute(vrndns_f32(transmute(a)));
17df50a5
XL
20895 assert_eq!(r, e);
20896 }
20897
20898 #[simd_test(enable = "neon")]
3c0e092e
XL
20899 unsafe fn test_vrndm_f32() {
20900 let a: f32x2 = f32x2::new(-1.5, 0.5);
20901 let e: f32x2 = f32x2::new(-2.0, 0.0);
20902 let r: f32x2 = transmute(vrndm_f32(transmute(a)));
17df50a5
XL
20903 assert_eq!(r, e);
20904 }
20905
20906 #[simd_test(enable = "neon")]
3c0e092e
XL
20907 unsafe fn test_vrndmq_f32() {
20908 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20909 let e: f32x4 = f32x4::new(-2.0, 0.0, 1.0, 2.0);
20910 let r: f32x4 = transmute(vrndmq_f32(transmute(a)));
17df50a5
XL
20911 assert_eq!(r, e);
20912 }
20913
20914 #[simd_test(enable = "neon")]
3c0e092e
XL
20915 unsafe fn test_vrndm_f64() {
20916 let a: f64 = -1.5;
20917 let e: f64 = -2.0;
20918 let r: f64 = transmute(vrndm_f64(transmute(a)));
17df50a5
XL
20919 assert_eq!(r, e);
20920 }
20921
20922 #[simd_test(enable = "neon")]
3c0e092e
XL
20923 unsafe fn test_vrndmq_f64() {
20924 let a: f64x2 = f64x2::new(-1.5, 0.5);
20925 let e: f64x2 = f64x2::new(-2.0, 0.0);
20926 let r: f64x2 = transmute(vrndmq_f64(transmute(a)));
17df50a5
XL
20927 assert_eq!(r, e);
20928 }
20929
20930 #[simd_test(enable = "neon")]
3c0e092e
XL
20931 unsafe fn test_vrndp_f32() {
20932 let a: f32x2 = f32x2::new(-1.5, 0.5);
20933 let e: f32x2 = f32x2::new(-1.0, 1.0);
20934 let r: f32x2 = transmute(vrndp_f32(transmute(a)));
17df50a5
XL
20935 assert_eq!(r, e);
20936 }
20937
20938 #[simd_test(enable = "neon")]
3c0e092e
XL
20939 unsafe fn test_vrndpq_f32() {
20940 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20941 let e: f32x4 = f32x4::new(-1.0, 1.0, 2.0, 3.0);
20942 let r: f32x4 = transmute(vrndpq_f32(transmute(a)));
17df50a5
XL
20943 assert_eq!(r, e);
20944 }
20945
20946 #[simd_test(enable = "neon")]
3c0e092e
XL
20947 unsafe fn test_vrndp_f64() {
20948 let a: f64 = -1.5;
20949 let e: f64 = -1.0;
20950 let r: f64 = transmute(vrndp_f64(transmute(a)));
17df50a5
XL
20951 assert_eq!(r, e);
20952 }
20953
20954 #[simd_test(enable = "neon")]
3c0e092e
XL
20955 unsafe fn test_vrndpq_f64() {
20956 let a: f64x2 = f64x2::new(-1.5, 0.5);
20957 let e: f64x2 = f64x2::new(-1.0, 1.0);
20958 let r: f64x2 = transmute(vrndpq_f64(transmute(a)));
17df50a5
XL
20959 assert_eq!(r, e);
20960 }
20961
20962 #[simd_test(enable = "neon")]
3c0e092e
XL
20963 unsafe fn test_vrnd_f32() {
20964 let a: f32x2 = f32x2::new(-1.5, 0.5);
20965 let e: f32x2 = f32x2::new(-1.0, 0.0);
20966 let r: f32x2 = transmute(vrnd_f32(transmute(a)));
17df50a5
XL
20967 assert_eq!(r, e);
20968 }
20969
20970 #[simd_test(enable = "neon")]
3c0e092e
XL
20971 unsafe fn test_vrndq_f32() {
20972 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20973 let e: f32x4 = f32x4::new(-1.0, 0.0, 1.0, 2.0);
20974 let r: f32x4 = transmute(vrndq_f32(transmute(a)));
17df50a5
XL
20975 assert_eq!(r, e);
20976 }
20977
20978 #[simd_test(enable = "neon")]
3c0e092e
XL
20979 unsafe fn test_vrnd_f64() {
20980 let a: f64 = -1.5;
20981 let e: f64 = -1.0;
20982 let r: f64 = transmute(vrnd_f64(transmute(a)));
17df50a5
XL
20983 assert_eq!(r, e);
20984 }
20985
20986 #[simd_test(enable = "neon")]
3c0e092e
XL
20987 unsafe fn test_vrndq_f64() {
20988 let a: f64x2 = f64x2::new(-1.5, 0.5);
20989 let e: f64x2 = f64x2::new(-1.0, 0.0);
20990 let r: f64x2 = transmute(vrndq_f64(transmute(a)));
17df50a5
XL
20991 assert_eq!(r, e);
20992 }
20993
20994 #[simd_test(enable = "neon")]
3c0e092e
XL
20995 unsafe fn test_vrndi_f32() {
20996 let a: f32x2 = f32x2::new(-1.5, 0.5);
20997 let e: f32x2 = f32x2::new(-2.0, 0.0);
20998 let r: f32x2 = transmute(vrndi_f32(transmute(a)));
17df50a5
XL
20999 assert_eq!(r, e);
21000 }
21001
21002 #[simd_test(enable = "neon")]
3c0e092e
XL
21003 unsafe fn test_vrndiq_f32() {
21004 let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21005 let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
21006 let r: f32x4 = transmute(vrndiq_f32(transmute(a)));
17df50a5
XL
21007 assert_eq!(r, e);
21008 }
21009
21010 #[simd_test(enable = "neon")]
3c0e092e
XL
21011 unsafe fn test_vrndi_f64() {
21012 let a: f64 = -1.5;
21013 let e: f64 = -2.0;
21014 let r: f64 = transmute(vrndi_f64(transmute(a)));
17df50a5
XL
21015 assert_eq!(r, e);
21016 }
21017
21018 #[simd_test(enable = "neon")]
3c0e092e
XL
21019 unsafe fn test_vrndiq_f64() {
21020 let a: f64x2 = f64x2::new(-1.5, 0.5);
21021 let e: f64x2 = f64x2::new(-2.0, 0.0);
21022 let r: f64x2 = transmute(vrndiq_f64(transmute(a)));
17df50a5
XL
21023 assert_eq!(r, e);
21024 }
21025
21026 #[simd_test(enable = "neon")]
3c0e092e
XL
21027 unsafe fn test_vqaddb_s8() {
21028 let a: i8 = 42;
21029 let b: i8 = 1;
21030 let e: i8 = 43;
21031 let r: i8 = transmute(vqaddb_s8(transmute(a), transmute(b)));
17df50a5
XL
21032 assert_eq!(r, e);
21033 }
21034
21035 #[simd_test(enable = "neon")]
3c0e092e
XL
21036 unsafe fn test_vqaddh_s16() {
21037 let a: i16 = 42;
21038 let b: i16 = 1;
21039 let e: i16 = 43;
21040 let r: i16 = transmute(vqaddh_s16(transmute(a), transmute(b)));
17df50a5
XL
21041 assert_eq!(r, e);
21042 }
21043
21044 #[simd_test(enable = "neon")]
3c0e092e
XL
21045 unsafe fn test_vqaddb_u8() {
21046 let a: u8 = 42;
21047 let b: u8 = 1;
21048 let e: u8 = 43;
21049 let r: u8 = transmute(vqaddb_u8(transmute(a), transmute(b)));
17df50a5
XL
21050 assert_eq!(r, e);
21051 }
21052
21053 #[simd_test(enable = "neon")]
3c0e092e
XL
21054 unsafe fn test_vqaddh_u16() {
21055 let a: u16 = 42;
21056 let b: u16 = 1;
21057 let e: u16 = 43;
21058 let r: u16 = transmute(vqaddh_u16(transmute(a), transmute(b)));
17df50a5
XL
21059 assert_eq!(r, e);
21060 }
21061
21062 #[simd_test(enable = "neon")]
3c0e092e
XL
21063 unsafe fn test_vqadds_u32() {
21064 let a: u32 = 42;
21065 let b: u32 = 1;
21066 let e: u32 = 43;
21067 let r: u32 = transmute(vqadds_u32(transmute(a), transmute(b)));
17df50a5
XL
21068 assert_eq!(r, e);
21069 }
21070
21071 #[simd_test(enable = "neon")]
3c0e092e
XL
21072 unsafe fn test_vqaddd_u64() {
21073 let a: u64 = 42;
21074 let b: u64 = 1;
21075 let e: u64 = 43;
21076 let r: u64 = transmute(vqaddd_u64(transmute(a), transmute(b)));
17df50a5
XL
21077 assert_eq!(r, e);
21078 }
21079
21080 #[simd_test(enable = "neon")]
3c0e092e
XL
21081 unsafe fn test_vqadds_s32() {
21082 let a: i32 = 42;
21083 let b: i32 = 1;
21084 let e: i32 = 43;
21085 let r: i32 = transmute(vqadds_s32(transmute(a), transmute(b)));
21086 assert_eq!(r, e);
21087 }
21088
21089 #[simd_test(enable = "neon")]
21090 unsafe fn test_vqaddd_s64() {
21091 let a: i64 = 42;
21092 let b: i64 = 1;
21093 let e: i64 = 43;
21094 let r: i64 = transmute(vqaddd_s64(transmute(a), transmute(b)));
17df50a5
XL
21095 assert_eq!(r, e);
21096 }
21097
21098 #[simd_test(enable = "neon")]
3c0e092e
XL
21099 unsafe fn test_vld1_f64_x2() {
21100 let a: [f64; 3] = [0., 1., 2.];
21101 let e: [f64; 2] = [1., 2.];
21102 let r: [f64; 2] = transmute(vld1_f64_x2(a[1..].as_ptr()));
17df50a5
XL
21103 assert_eq!(r, e);
21104 }
21105
21106 #[simd_test(enable = "neon")]
3c0e092e
XL
21107 unsafe fn test_vld1q_f64_x2() {
21108 let a: [f64; 5] = [0., 1., 2., 3., 4.];
21109 let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(3., 4.)];
21110 let r: [f64x2; 2] = transmute(vld1q_f64_x2(a[1..].as_ptr()));
17df50a5
XL
21111 assert_eq!(r, e);
21112 }
21113
21114 #[simd_test(enable = "neon")]
3c0e092e
XL
21115 unsafe fn test_vld1_f64_x3() {
21116 let a: [f64; 4] = [0., 1., 2., 3.];
21117 let e: [f64; 3] = [1., 2., 3.];
21118 let r: [f64; 3] = transmute(vld1_f64_x3(a[1..].as_ptr()));
17df50a5
XL
21119 assert_eq!(r, e);
21120 }
21121
21122 #[simd_test(enable = "neon")]
3c0e092e
XL
21123 unsafe fn test_vld1q_f64_x3() {
21124 let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.];
21125 let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.)];
21126 let r: [f64x2; 3] = transmute(vld1q_f64_x3(a[1..].as_ptr()));
17df50a5
XL
21127 assert_eq!(r, e);
21128 }
21129
21130 #[simd_test(enable = "neon")]
3c0e092e
XL
21131 unsafe fn test_vld1_f64_x4() {
21132 let a: [f64; 5] = [0., 1., 2., 3., 4.];
21133 let e: [f64; 4] = [1., 2., 3., 4.];
21134 let r: [f64; 4] = transmute(vld1_f64_x4(a[1..].as_ptr()));
17df50a5
XL
21135 assert_eq!(r, e);
21136 }
21137
21138 #[simd_test(enable = "neon")]
3c0e092e
XL
21139 unsafe fn test_vld1q_f64_x4() {
21140 let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
21141 let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.), f64x2::new(7., 8.)];
21142 let r: [f64x2; 4] = transmute(vld1q_f64_x4(a[1..].as_ptr()));
17df50a5
XL
21143 assert_eq!(r, e);
21144 }
21145
21146 #[simd_test(enable = "neon")]
3c0e092e
XL
21147 unsafe fn test_vld2q_s64() {
21148 let a: [i64; 5] = [0, 1, 2, 2, 3];
21149 let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)];
21150 let r: [i64x2; 2] = transmute(vld2q_s64(a[1..].as_ptr()));
17df50a5
XL
21151 assert_eq!(r, e);
21152 }
21153
21154 #[simd_test(enable = "neon")]
3c0e092e
XL
21155 unsafe fn test_vld2q_u64() {
21156 let a: [u64; 5] = [0, 1, 2, 2, 3];
21157 let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 3)];
21158 let r: [u64x2; 2] = transmute(vld2q_u64(a[1..].as_ptr()));
17df50a5
XL
21159 assert_eq!(r, e);
21160 }
21161
21162 #[simd_test(enable = "neon")]
3c0e092e
XL
21163 unsafe fn test_vld2q_p64() {
21164 let a: [u64; 5] = [0, 1, 2, 2, 3];
21165 let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)];
21166 let r: [i64x2; 2] = transmute(vld2q_p64(a[1..].as_ptr()));
17df50a5
XL
21167 assert_eq!(r, e);
21168 }
21169
21170 #[simd_test(enable = "neon")]
3c0e092e
XL
21171 unsafe fn test_vld2_f64() {
21172 let a: [f64; 3] = [0., 1., 2.];
21173 let e: [f64; 2] = [1., 2.];
21174 let r: [f64; 2] = transmute(vld2_f64(a[1..].as_ptr()));
17df50a5
XL
21175 assert_eq!(r, e);
21176 }
21177
21178 #[simd_test(enable = "neon")]
3c0e092e
XL
21179 unsafe fn test_vld2q_f64() {
21180 let a: [f64; 5] = [0., 1., 2., 2., 3.];
21181 let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 3.)];
21182 let r: [f64x2; 2] = transmute(vld2q_f64(a[1..].as_ptr()));
17df50a5
XL
21183 assert_eq!(r, e);
21184 }
21185
21186 #[simd_test(enable = "neon")]
3c0e092e
XL
21187 unsafe fn test_vld2q_dup_s64() {
21188 let a: [i64; 5] = [0, 1, 1, 2, 3];
21189 let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)];
21190 let r: [i64x2; 2] = transmute(vld2q_dup_s64(a[1..].as_ptr()));
17df50a5
XL
21191 assert_eq!(r, e);
21192 }
21193
21194 #[simd_test(enable = "neon")]
3c0e092e
XL
21195 unsafe fn test_vld2q_dup_u64() {
21196 let a: [u64; 5] = [0, 1, 1, 2, 3];
21197 let e: [u64x2; 2] = [u64x2::new(1, 1), u64x2::new(1, 1)];
21198 let r: [u64x2; 2] = transmute(vld2q_dup_u64(a[1..].as_ptr()));
17df50a5
XL
21199 assert_eq!(r, e);
21200 }
21201
21202 #[simd_test(enable = "neon")]
3c0e092e
XL
21203 unsafe fn test_vld2q_dup_p64() {
21204 let a: [u64; 5] = [0, 1, 1, 2, 3];
21205 let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)];
21206 let r: [i64x2; 2] = transmute(vld2q_dup_p64(a[1..].as_ptr()));
17df50a5
XL
21207 assert_eq!(r, e);
21208 }
21209
21210 #[simd_test(enable = "neon")]
3c0e092e
XL
21211 unsafe fn test_vld2_dup_f64() {
21212 let a: [f64; 3] = [0., 1., 1.];
21213 let e: [f64; 2] = [1., 1.];
21214 let r: [f64; 2] = transmute(vld2_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21215 assert_eq!(r, e);
21216 }
21217
21218 #[simd_test(enable = "neon")]
3c0e092e
XL
21219 unsafe fn test_vld2q_dup_f64() {
21220 let a: [f64; 5] = [0., 1., 1., 2., 3.];
21221 let e: [f64x2; 2] = [f64x2::new(1., 1.), f64x2::new(1., 1.)];
21222 let r: [f64x2; 2] = transmute(vld2q_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21223 assert_eq!(r, e);
21224 }
21225
21226 #[simd_test(enable = "neon")]
3c0e092e
XL
21227 unsafe fn test_vld2q_lane_s8() {
21228 let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21229 let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21230 let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21231 let r: [i8x16; 2] = transmute(vld2q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21232 assert_eq!(r, e);
21233 }
21234
21235 #[simd_test(enable = "neon")]
3c0e092e
XL
21236 unsafe fn test_vld2_lane_s64() {
21237 let a: [i64; 3] = [0, 1, 2];
21238 let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)];
21239 let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
21240 let r: [i64x1; 2] = transmute(vld2_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21241 assert_eq!(r, e);
21242 }
21243
21244 #[simd_test(enable = "neon")]
3c0e092e
XL
21245 unsafe fn test_vld2q_lane_s64() {
21246 let a: [i64; 5] = [0, 1, 2, 3, 4];
21247 let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)];
21248 let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)];
21249 let r: [i64x2; 2] = transmute(vld2q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21250 assert_eq!(r, e);
21251 }
21252
21253 #[simd_test(enable = "neon")]
3c0e092e
XL
21254 unsafe fn test_vld2_lane_p64() {
21255 let a: [u64; 3] = [0, 1, 2];
21256 let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)];
21257 let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
21258 let r: [i64x1; 2] = transmute(vld2_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21259 assert_eq!(r, e);
21260 }
21261
21262 #[simd_test(enable = "neon")]
3c0e092e
XL
21263 unsafe fn test_vld2q_lane_p64() {
21264 let a: [u64; 5] = [0, 1, 2, 3, 4];
21265 let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)];
21266 let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)];
21267 let r: [i64x2; 2] = transmute(vld2q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21268 assert_eq!(r, e);
21269 }
21270
21271 #[simd_test(enable = "neon")]
3c0e092e
XL
21272 unsafe fn test_vld2q_lane_u8() {
21273 let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21274 let b: [u8x16; 2] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21275 let e: [u8x16; 2] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21276 let r: [u8x16; 2] = transmute(vld2q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21277 assert_eq!(r, e);
21278 }
21279
21280 #[simd_test(enable = "neon")]
3c0e092e
XL
21281 unsafe fn test_vld2_lane_u64() {
21282 let a: [u64; 3] = [0, 1, 2];
21283 let b: [u64x1; 2] = [u64x1::new(0), u64x1::new(2)];
21284 let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)];
21285 let r: [u64x1; 2] = transmute(vld2_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21286 assert_eq!(r, e);
21287 }
21288
21289 #[simd_test(enable = "neon")]
3c0e092e
XL
21290 unsafe fn test_vld2q_lane_u64() {
21291 let a: [u64; 5] = [0, 1, 2, 3, 4];
21292 let b: [u64x2; 2] = [u64x2::new(0, 2), u64x2::new(2, 14)];
21293 let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 14)];
21294 let r: [u64x2; 2] = transmute(vld2q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21295 assert_eq!(r, e);
21296 }
21297
21298 #[simd_test(enable = "neon")]
3c0e092e
XL
21299 unsafe fn test_vld2q_lane_p8() {
21300 let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21301 let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21302 let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21303 let r: [i8x16; 2] = transmute(vld2q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21304 assert_eq!(r, e);
21305 }
21306
21307 #[simd_test(enable = "neon")]
3c0e092e
XL
21308 unsafe fn test_vld2_lane_f64() {
21309 let a: [f64; 3] = [0., 1., 2.];
21310 let b: [f64; 2] = [0., 2.];
21311 let e: [f64; 2] = [1., 2.];
21312 let r: [f64; 2] = transmute(vld2_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21313 assert_eq!(r, e);
21314 }
21315
21316 #[simd_test(enable = "neon")]
3c0e092e
XL
21317 unsafe fn test_vld2q_lane_f64() {
21318 let a: [f64; 5] = [0., 1., 2., 3., 4.];
21319 let b: [f64x2; 2] = [f64x2::new(0., 2.), f64x2::new(2., 14.)];
21320 let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 14.)];
21321 let r: [f64x2; 2] = transmute(vld2q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21322 assert_eq!(r, e);
21323 }
21324
21325 #[simd_test(enable = "neon")]
3c0e092e
XL
21326 unsafe fn test_vld3q_s64() {
21327 let a: [i64; 7] = [0, 1, 2, 2, 2, 4, 4];
21328 let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)];
21329 let r: [i64x2; 3] = transmute(vld3q_s64(a[1..].as_ptr()));
17df50a5
XL
21330 assert_eq!(r, e);
21331 }
21332
21333 #[simd_test(enable = "neon")]
3c0e092e
XL
21334 unsafe fn test_vld3q_u64() {
21335 let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4];
21336 let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 4), u64x2::new(2, 4)];
21337 let r: [u64x2; 3] = transmute(vld3q_u64(a[1..].as_ptr()));
17df50a5
XL
21338 assert_eq!(r, e);
21339 }
21340
21341 #[simd_test(enable = "neon")]
3c0e092e
XL
21342 unsafe fn test_vld3q_p64() {
21343 let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4];
21344 let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)];
21345 let r: [i64x2; 3] = transmute(vld3q_p64(a[1..].as_ptr()));
17df50a5
XL
21346 assert_eq!(r, e);
21347 }
21348
21349 #[simd_test(enable = "neon")]
3c0e092e
XL
21350 unsafe fn test_vld3_f64() {
21351 let a: [f64; 4] = [0., 1., 2., 2.];
21352 let e: [f64; 3] = [1., 2., 2.];
21353 let r: [f64; 3] = transmute(vld3_f64(a[1..].as_ptr()));
17df50a5
XL
21354 assert_eq!(r, e);
21355 }
21356
21357 #[simd_test(enable = "neon")]
3c0e092e
XL
21358 unsafe fn test_vld3q_f64() {
21359 let a: [f64; 7] = [0., 1., 2., 2., 2., 4., 4.];
21360 let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 4.), f64x2::new(2., 4.)];
21361 let r: [f64x2; 3] = transmute(vld3q_f64(a[1..].as_ptr()));
17df50a5
XL
21362 assert_eq!(r, e);
21363 }
21364
21365 #[simd_test(enable = "neon")]
3c0e092e
XL
21366 unsafe fn test_vld3q_dup_s64() {
21367 let a: [i64; 7] = [0, 1, 1, 1, 3, 1, 4];
21368 let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21369 let r: [i64x2; 3] = transmute(vld3q_dup_s64(a[1..].as_ptr()));
17df50a5
XL
21370 assert_eq!(r, e);
21371 }
21372
21373 #[simd_test(enable = "neon")]
3c0e092e
XL
21374 unsafe fn test_vld3q_dup_u64() {
21375 let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4];
21376 let e: [u64x2; 3] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)];
21377 let r: [u64x2; 3] = transmute(vld3q_dup_u64(a[1..].as_ptr()));
17df50a5
XL
21378 assert_eq!(r, e);
21379 }
21380
21381 #[simd_test(enable = "neon")]
3c0e092e
XL
21382 unsafe fn test_vld3q_dup_p64() {
21383 let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4];
21384 let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21385 let r: [i64x2; 3] = transmute(vld3q_dup_p64(a[1..].as_ptr()));
17df50a5
XL
21386 assert_eq!(r, e);
21387 }
21388
21389 #[simd_test(enable = "neon")]
3c0e092e
XL
21390 unsafe fn test_vld3_dup_f64() {
21391 let a: [f64; 4] = [0., 1., 1., 1.];
21392 let e: [f64; 3] = [1., 1., 1.];
21393 let r: [f64; 3] = transmute(vld3_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21394 assert_eq!(r, e);
21395 }
21396
21397 #[simd_test(enable = "neon")]
3c0e092e
XL
21398 unsafe fn test_vld3q_dup_f64() {
21399 let a: [f64; 7] = [0., 1., 1., 1., 3., 1., 4.];
21400 let e: [f64x2; 3] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)];
21401 let r: [f64x2; 3] = transmute(vld3q_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21402 assert_eq!(r, e);
21403 }
21404
21405 #[simd_test(enable = "neon")]
3c0e092e
XL
21406 unsafe fn test_vld3q_lane_s8() {
21407 let a: [i8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21408 let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21409 let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21410 let r: [i8x16; 3] = transmute(vld3q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21411 assert_eq!(r, e);
21412 }
21413
21414 #[simd_test(enable = "neon")]
3c0e092e
XL
21415 unsafe fn test_vld3_lane_s64() {
21416 let a: [i64; 4] = [0, 1, 2, 2];
21417 let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)];
21418 let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
21419 let r: [i64x1; 3] = transmute(vld3_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21420 assert_eq!(r, e);
21421 }
21422
21423 #[simd_test(enable = "neon")]
3c0e092e
XL
21424 unsafe fn test_vld3q_lane_s64() {
21425 let a: [i64; 7] = [0, 1, 2, 2, 4, 5, 6];
21426 let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21427 let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21428 let r: [i64x2; 3] = transmute(vld3q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21429 assert_eq!(r, e);
21430 }
21431
21432 #[simd_test(enable = "neon")]
3c0e092e
XL
21433 unsafe fn test_vld3_lane_p64() {
21434 let a: [u64; 4] = [0, 1, 2, 2];
21435 let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)];
21436 let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
21437 let r: [i64x1; 3] = transmute(vld3_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21438 assert_eq!(r, e);
21439 }
21440
21441 #[simd_test(enable = "neon")]
3c0e092e
XL
21442 unsafe fn test_vld3q_lane_p64() {
21443 let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6];
21444 let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21445 let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21446 let r: [i64x2; 3] = transmute(vld3q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21447 assert_eq!(r, e);
21448 }
21449
21450 #[simd_test(enable = "neon")]
3c0e092e
XL
21451 unsafe fn test_vld3q_lane_p8() {
21452 let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21453 let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21454 let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21455 let r: [i8x16; 3] = transmute(vld3q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21456 assert_eq!(r, e);
21457 }
21458
21459 #[simd_test(enable = "neon")]
3c0e092e
XL
21460 unsafe fn test_vld3q_lane_u8() {
21461 let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21462 let b: [u8x16; 3] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21463 let e: [u8x16; 3] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21464 let r: [u8x16; 3] = transmute(vld3q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21465 assert_eq!(r, e);
21466 }
21467
21468 #[simd_test(enable = "neon")]
3c0e092e
XL
21469 unsafe fn test_vld3_lane_u64() {
21470 let a: [u64; 4] = [0, 1, 2, 2];
21471 let b: [u64x1; 3] = [u64x1::new(0), u64x1::new(2), u64x1::new(2)];
21472 let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(2)];
21473 let r: [u64x1; 3] = transmute(vld3_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21474 assert_eq!(r, e);
21475 }
21476
21477 #[simd_test(enable = "neon")]
3c0e092e
XL
21478 unsafe fn test_vld3q_lane_u64() {
21479 let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6];
21480 let b: [u64x2; 3] = [u64x2::new(0, 2), u64x2::new(2, 14), u64x2::new(2, 16)];
21481 let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 14), u64x2::new(2, 16)];
21482 let r: [u64x2; 3] = transmute(vld3q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21483 assert_eq!(r, e);
21484 }
21485
21486 #[simd_test(enable = "neon")]
3c0e092e
XL
21487 unsafe fn test_vld3_lane_f64() {
21488 let a: [f64; 4] = [0., 1., 2., 2.];
21489 let b: [f64; 3] = [0., 2., 2.];
21490 let e: [f64; 3] = [1., 2., 2.];
21491 let r: [f64; 3] = transmute(vld3_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21492 assert_eq!(r, e);
21493 }
21494
21495 #[simd_test(enable = "neon")]
3c0e092e
XL
21496 unsafe fn test_vld3q_lane_f64() {
21497 let a: [f64; 7] = [0., 1., 2., 2., 4., 5., 6.];
21498 let b: [f64x2; 3] = [f64x2::new(0., 2.), f64x2::new(2., 14.), f64x2::new(9., 16.)];
21499 let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 14.), f64x2::new(2., 16.)];
21500 let r: [f64x2; 3] = transmute(vld3q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21501 assert_eq!(r, e);
21502 }
21503
21504 #[simd_test(enable = "neon")]
3c0e092e
XL
21505 unsafe fn test_vld4q_s64() {
21506 let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21507 let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)];
21508 let r: [i64x2; 4] = transmute(vld4q_s64(a[1..].as_ptr()));
17df50a5
XL
21509 assert_eq!(r, e);
21510 }
21511
21512 #[simd_test(enable = "neon")]
3c0e092e
XL
21513 unsafe fn test_vld4q_u64() {
21514 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21515 let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 6), u64x2::new(2, 6), u64x2::new(6, 8)];
21516 let r: [u64x2; 4] = transmute(vld4q_u64(a[1..].as_ptr()));
17df50a5
XL
21517 assert_eq!(r, e);
21518 }
21519
21520 #[simd_test(enable = "neon")]
3c0e092e
XL
21521 unsafe fn test_vld4q_p64() {
21522 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21523 let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)];
21524 let r: [i64x2; 4] = transmute(vld4q_p64(a[1..].as_ptr()));
17df50a5
XL
21525 assert_eq!(r, e);
21526 }
21527
21528 #[simd_test(enable = "neon")]
3c0e092e
XL
21529 unsafe fn test_vld4_f64() {
21530 let a: [f64; 5] = [0., 1., 2., 2., 6.];
21531 let e: [f64; 4] = [1., 2., 2., 6.];
21532 let r: [f64; 4] = transmute(vld4_f64(a[1..].as_ptr()));
17df50a5
XL
21533 assert_eq!(r, e);
21534 }
21535
21536 #[simd_test(enable = "neon")]
3c0e092e
XL
21537 unsafe fn test_vld4q_f64() {
21538 let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
21539 let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 6.), f64x2::new(2., 6.), f64x2::new(6., 8.)];
21540 let r: [f64x2; 4] = transmute(vld4q_f64(a[1..].as_ptr()));
17df50a5
XL
21541 assert_eq!(r, e);
21542 }
21543
21544 #[simd_test(enable = "neon")]
3c0e092e
XL
21545 unsafe fn test_vld4q_dup_s64() {
21546 let a: [i64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21547 let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21548 let r: [i64x2; 4] = transmute(vld4q_dup_s64(a[1..].as_ptr()));
17df50a5
XL
21549 assert_eq!(r, e);
21550 }
21551
3c0e092e
XL
21552 #[simd_test(enable = "neon")]
21553 unsafe fn test_vld4q_dup_u64() {
21554 let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21555 let e: [u64x2; 4] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)];
21556 let r: [u64x2; 4] = transmute(vld4q_dup_u64(a[1..].as_ptr()));
17df50a5
XL
21557 assert_eq!(r, e);
21558 }
21559
21560 #[simd_test(enable = "neon")]
3c0e092e
XL
21561 unsafe fn test_vld4q_dup_p64() {
21562 let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21563 let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21564 let r: [i64x2; 4] = transmute(vld4q_dup_p64(a[1..].as_ptr()));
17df50a5
XL
21565 assert_eq!(r, e);
21566 }
21567
21568 #[simd_test(enable = "neon")]
3c0e092e
XL
21569 unsafe fn test_vld4_dup_f64() {
21570 let a: [f64; 5] = [0., 1., 1., 1., 1.];
21571 let e: [f64; 4] = [1., 1., 1., 1.];
21572 let r: [f64; 4] = transmute(vld4_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21573 assert_eq!(r, e);
21574 }
21575
21576 #[simd_test(enable = "neon")]
3c0e092e
XL
21577 unsafe fn test_vld4q_dup_f64() {
21578 let a: [f64; 9] = [0., 1., 1., 1., 1., 6., 4., 3., 5.];
21579 let e: [f64x2; 4] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)];
21580 let r: [f64x2; 4] = transmute(vld4q_dup_f64(a[1..].as_ptr()));
17df50a5
XL
21581 assert_eq!(r, e);
21582 }
21583
21584 #[simd_test(enable = "neon")]
3c0e092e
XL
21585 unsafe fn test_vld4q_lane_s8() {
21586 let a: [i8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21587 let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21588 let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21589 let r: [i8x16; 4] = transmute(vld4q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21590 assert_eq!(r, e);
21591 }
21592
21593 #[simd_test(enable = "neon")]
3c0e092e
XL
21594 unsafe fn test_vld4_lane_s64() {
21595 let a: [i64; 5] = [0, 1, 2, 2, 2];
21596 let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21597 let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21598 let r: [i64x1; 4] = transmute(vld4_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21599 assert_eq!(r, e);
21600 }
21601
21602 #[simd_test(enable = "neon")]
3c0e092e
XL
21603 unsafe fn test_vld4q_lane_s64() {
21604 let a: [i64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21605 let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21606 let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21607 let r: [i64x2; 4] = transmute(vld4q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21608 assert_eq!(r, e);
21609 }
21610
21611 #[simd_test(enable = "neon")]
3c0e092e
XL
21612 unsafe fn test_vld4_lane_p64() {
21613 let a: [u64; 5] = [0, 1, 2, 2, 2];
21614 let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21615 let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21616 let r: [i64x1; 4] = transmute(vld4_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21617 assert_eq!(r, e);
21618 }
21619
21620 #[simd_test(enable = "neon")]
3c0e092e
XL
21621 unsafe fn test_vld4q_lane_p64() {
21622 let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21623 let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21624 let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21625 let r: [i64x2; 4] = transmute(vld4q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21626 assert_eq!(r, e);
21627 }
21628
21629 #[simd_test(enable = "neon")]
3c0e092e
XL
21630 unsafe fn test_vld4q_lane_p8() {
21631 let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21632 let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21633 let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21634 let r: [i8x16; 4] = transmute(vld4q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21635 assert_eq!(r, e);
21636 }
21637
21638 #[simd_test(enable = "neon")]
3c0e092e
XL
21639 unsafe fn test_vld4q_lane_u8() {
21640 let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21641 let b: [u8x16; 4] = [u8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21642 let e: [u8x16; 4] = [u8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21643 let r: [u8x16; 4] = transmute(vld4q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21644 assert_eq!(r, e);
21645 }
21646
21647 #[simd_test(enable = "neon")]
3c0e092e
XL
21648 unsafe fn test_vld4_lane_u64() {
21649 let a: [u64; 5] = [0, 1, 2, 2, 2];
21650 let b: [u64x1; 4] = [u64x1::new(0), u64x1::new(2), u64x1::new(2), u64x1::new(2)];
21651 let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(2), u64x1::new(2)];
21652 let r: [u64x1; 4] = transmute(vld4_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21653 assert_eq!(r, e);
21654 }
21655
21656 #[simd_test(enable = "neon")]
3c0e092e
XL
21657 unsafe fn test_vld4q_lane_u64() {
21658 let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21659 let b: [u64x2; 4] = [u64x2::new(0, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)];
21660 let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)];
21661 let r: [u64x2; 4] = transmute(vld4q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21662 assert_eq!(r, e);
21663 }
21664
21665 #[simd_test(enable = "neon")]
3c0e092e
XL
21666 unsafe fn test_vld4_lane_f64() {
21667 let a: [f64; 5] = [0., 1., 2., 2., 2.];
21668 let b: [f64; 4] = [0., 2., 2., 2.];
21669 let e: [f64; 4] = [1., 2., 2., 2.];
21670 let r: [f64; 4] = transmute(vld4_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21671 assert_eq!(r, e);
21672 }
21673
21674 #[simd_test(enable = "neon")]
3c0e092e
XL
21675 unsafe fn test_vld4q_lane_f64() {
21676 let a: [f64; 9] = [0., 1., 2., 2., 2., 5., 6., 7., 8.];
21677 let b: [f64x2; 4] = [f64x2::new(0., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)];
21678 let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)];
21679 let r: [f64x2; 4] = transmute(vld4q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
17df50a5
XL
21680 assert_eq!(r, e);
21681 }
21682
21683 #[simd_test(enable = "neon")]
3c0e092e
XL
21684 unsafe fn test_vst1_lane_f64() {
21685 let a: [f64; 2] = [0., 1.];
21686 let e: [f64; 1] = [1.];
21687 let mut r: [f64; 1] = [0f64; 1];
a2a8927a 21688 vst1_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21689 assert_eq!(r, e);
21690 }
21691
21692 #[simd_test(enable = "neon")]
3c0e092e
XL
21693 unsafe fn test_vst1q_lane_f64() {
21694 let a: [f64; 3] = [0., 1., 2.];
21695 let e: [f64; 2] = [1., 0.];
21696 let mut r: [f64; 2] = [0f64; 2];
a2a8927a 21697 vst1q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21698 assert_eq!(r, e);
21699 }
21700
21701 #[simd_test(enable = "neon")]
3c0e092e
XL
21702 unsafe fn test_vst1_f64_x2() {
21703 let a: [f64; 3] = [0., 1., 2.];
21704 let e: [f64; 2] = [1., 2.];
21705 let mut r: [f64; 2] = [0f64; 2];
a2a8927a 21706 vst1_f64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21707 assert_eq!(r, e);
21708 }
21709
21710 #[simd_test(enable = "neon")]
3c0e092e
XL
21711 unsafe fn test_vst1q_f64_x2() {
21712 let a: [f64; 5] = [0., 1., 2., 3., 4.];
21713 let e: [f64; 4] = [1., 2., 3., 4.];
21714 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 21715 vst1q_f64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21716 assert_eq!(r, e);
21717 }
21718
21719 #[simd_test(enable = "neon")]
3c0e092e
XL
21720 unsafe fn test_vst1_f64_x3() {
21721 let a: [f64; 4] = [0., 1., 2., 3.];
21722 let e: [f64; 3] = [1., 2., 3.];
21723 let mut r: [f64; 3] = [0f64; 3];
a2a8927a 21724 vst1_f64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21725 assert_eq!(r, e);
21726 }
21727
21728 #[simd_test(enable = "neon")]
3c0e092e
XL
21729 unsafe fn test_vst1q_f64_x3() {
21730 let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.];
21731 let e: [f64; 6] = [1., 2., 3., 4., 5., 6.];
21732 let mut r: [f64; 6] = [0f64; 6];
a2a8927a 21733 vst1q_f64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21734 assert_eq!(r, e);
21735 }
21736
21737 #[simd_test(enable = "neon")]
3c0e092e
XL
21738 unsafe fn test_vst1_f64_x4() {
21739 let a: [f64; 5] = [0., 1., 2., 3., 4.];
21740 let e: [f64; 4] = [1., 2., 3., 4.];
21741 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 21742 vst1_f64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21743 assert_eq!(r, e);
21744 }
21745
21746 #[simd_test(enable = "neon")]
3c0e092e
XL
21747 unsafe fn test_vst1q_f64_x4() {
21748 let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
21749 let e: [f64; 8] = [1., 2., 3., 4., 5., 6., 7., 8.];
21750 let mut r: [f64; 8] = [0f64; 8];
a2a8927a 21751 vst1q_f64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21752 assert_eq!(r, e);
21753 }
21754
21755 #[simd_test(enable = "neon")]
3c0e092e
XL
21756 unsafe fn test_vst2q_s64() {
21757 let a: [i64; 5] = [0, 1, 2, 2, 3];
21758 let e: [i64; 4] = [1, 2, 2, 3];
21759 let mut r: [i64; 4] = [0i64; 4];
a2a8927a 21760 vst2q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21761 assert_eq!(r, e);
21762 }
21763
21764 #[simd_test(enable = "neon")]
3c0e092e
XL
21765 unsafe fn test_vst2q_u64() {
21766 let a: [u64; 5] = [0, 1, 2, 2, 3];
21767 let e: [u64; 4] = [1, 2, 2, 3];
21768 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 21769 vst2q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21770 assert_eq!(r, e);
21771 }
21772
21773 #[simd_test(enable = "neon")]
3c0e092e
XL
21774 unsafe fn test_vst2q_p64() {
21775 let a: [u64; 5] = [0, 1, 2, 2, 3];
21776 let e: [u64; 4] = [1, 2, 2, 3];
21777 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 21778 vst2q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21779 assert_eq!(r, e);
21780 }
21781
21782 #[simd_test(enable = "neon")]
3c0e092e
XL
21783 unsafe fn test_vst2_f64() {
21784 let a: [f64; 3] = [0., 1., 2.];
21785 let e: [f64; 2] = [1., 2.];
21786 let mut r: [f64; 2] = [0f64; 2];
a2a8927a 21787 vst2_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21788 assert_eq!(r, e);
21789 }
21790
21791 #[simd_test(enable = "neon")]
3c0e092e
XL
21792 unsafe fn test_vst2q_f64() {
21793 let a: [f64; 5] = [0., 1., 2., 2., 3.];
21794 let e: [f64; 4] = [1., 2., 2., 3.];
21795 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 21796 vst2q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21797 assert_eq!(r, e);
21798 }
21799
21800 #[simd_test(enable = "neon")]
3c0e092e
XL
21801 unsafe fn test_vst2q_lane_s8() {
21802 let a: [i8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
21803 let e: [i8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21804 let mut r: [i8; 32] = [0i8; 32];
a2a8927a 21805 vst2q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21806 assert_eq!(r, e);
21807 }
21808
21809 #[simd_test(enable = "neon")]
3c0e092e
XL
21810 unsafe fn test_vst2_lane_s64() {
21811 let a: [i64; 3] = [0, 1, 2];
21812 let e: [i64; 2] = [1, 2];
21813 let mut r: [i64; 2] = [0i64; 2];
a2a8927a 21814 vst2_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21815 assert_eq!(r, e);
21816 }
21817
21818 #[simd_test(enable = "neon")]
3c0e092e
XL
21819 unsafe fn test_vst2q_lane_s64() {
21820 let a: [i64; 5] = [0, 1, 2, 2, 3];
21821 let e: [i64; 4] = [1, 2, 0, 0];
21822 let mut r: [i64; 4] = [0i64; 4];
a2a8927a 21823 vst2q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21824 assert_eq!(r, e);
21825 }
21826
21827 #[simd_test(enable = "neon")]
3c0e092e
XL
21828 unsafe fn test_vst2q_lane_u8() {
21829 let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
21830 let e: [u8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21831 let mut r: [u8; 32] = [0u8; 32];
a2a8927a 21832 vst2q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21833 assert_eq!(r, e);
21834 }
21835
21836 #[simd_test(enable = "neon")]
3c0e092e
XL
21837 unsafe fn test_vst2_lane_u64() {
21838 let a: [u64; 3] = [0, 1, 2];
21839 let e: [u64; 2] = [1, 2];
21840 let mut r: [u64; 2] = [0u64; 2];
a2a8927a 21841 vst2_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21842 assert_eq!(r, e);
21843 }
21844
21845 #[simd_test(enable = "neon")]
3c0e092e
XL
21846 unsafe fn test_vst2q_lane_u64() {
21847 let a: [u64; 5] = [0, 1, 2, 2, 3];
21848 let e: [u64; 4] = [1, 2, 0, 0];
21849 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 21850 vst2q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21851 assert_eq!(r, e);
21852 }
21853
21854 #[simd_test(enable = "neon")]
3c0e092e
XL
21855 unsafe fn test_vst2q_lane_p8() {
21856 let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
21857 let e: [u8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21858 let mut r: [u8; 32] = [0u8; 32];
a2a8927a 21859 vst2q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21860 assert_eq!(r, e);
21861 }
21862
21863 #[simd_test(enable = "neon")]
3c0e092e
XL
21864 unsafe fn test_vst2_lane_p64() {
21865 let a: [u64; 3] = [0, 1, 2];
21866 let e: [u64; 2] = [1, 2];
21867 let mut r: [u64; 2] = [0u64; 2];
a2a8927a 21868 vst2_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21869 assert_eq!(r, e);
21870 }
21871
21872 #[simd_test(enable = "neon")]
3c0e092e
XL
21873 unsafe fn test_vst2q_lane_p64() {
21874 let a: [u64; 5] = [0, 1, 2, 2, 3];
21875 let e: [u64; 4] = [1, 2, 0, 0];
21876 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 21877 vst2q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21878 assert_eq!(r, e);
21879 }
21880
21881 #[simd_test(enable = "neon")]
3c0e092e
XL
21882 unsafe fn test_vst2_lane_f64() {
21883 let a: [f64; 3] = [0., 1., 2.];
21884 let e: [f64; 2] = [1., 2.];
21885 let mut r: [f64; 2] = [0f64; 2];
a2a8927a 21886 vst2_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21887 assert_eq!(r, e);
21888 }
21889
21890 #[simd_test(enable = "neon")]
3c0e092e
XL
21891 unsafe fn test_vst2q_lane_f64() {
21892 let a: [f64; 5] = [0., 1., 2., 2., 3.];
21893 let e: [f64; 4] = [1., 2., 0., 0.];
21894 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 21895 vst2q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21896 assert_eq!(r, e);
21897 }
21898
21899 #[simd_test(enable = "neon")]
3c0e092e
XL
21900 unsafe fn test_vst3q_s64() {
21901 let a: [i64; 7] = [0, 1, 2, 2, 4, 2, 4];
21902 let e: [i64; 6] = [1, 2, 2, 2, 4, 4];
21903 let mut r: [i64; 6] = [0i64; 6];
a2a8927a 21904 vst3q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21905 assert_eq!(r, e);
21906 }
21907
21908 #[simd_test(enable = "neon")]
3c0e092e
XL
21909 unsafe fn test_vst3q_u64() {
21910 let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
21911 let e: [u64; 6] = [1, 2, 2, 2, 4, 4];
21912 let mut r: [u64; 6] = [0u64; 6];
a2a8927a 21913 vst3q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21914 assert_eq!(r, e);
21915 }
21916
3c0e092e
XL
21917 #[simd_test(enable = "neon")]
21918 unsafe fn test_vst3q_p64() {
21919 let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
21920 let e: [u64; 6] = [1, 2, 2, 2, 4, 4];
21921 let mut r: [u64; 6] = [0u64; 6];
a2a8927a 21922 vst3q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21923 assert_eq!(r, e);
21924 }
21925
21926 #[simd_test(enable = "neon")]
3c0e092e
XL
21927 unsafe fn test_vst3_f64() {
21928 let a: [f64; 4] = [0., 1., 2., 2.];
21929 let e: [f64; 3] = [1., 2., 2.];
21930 let mut r: [f64; 3] = [0f64; 3];
a2a8927a 21931 vst3_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21932 assert_eq!(r, e);
21933 }
21934
21935 #[simd_test(enable = "neon")]
3c0e092e
XL
21936 unsafe fn test_vst3q_f64() {
21937 let a: [f64; 7] = [0., 1., 2., 2., 4., 2., 4.];
21938 let e: [f64; 6] = [1., 2., 2., 2., 4., 4.];
21939 let mut r: [f64; 6] = [0f64; 6];
a2a8927a 21940 vst3q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21941 assert_eq!(r, e);
21942 }
21943
21944 #[simd_test(enable = "neon")]
3c0e092e
XL
21945 unsafe fn test_vst3q_lane_s8() {
21946 let a: [i8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
21947 let e: [i8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21948 let mut r: [i8; 48] = [0i8; 48];
a2a8927a 21949 vst3q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21950 assert_eq!(r, e);
21951 }
21952
21953 #[simd_test(enable = "neon")]
3c0e092e
XL
21954 unsafe fn test_vst3_lane_s64() {
21955 let a: [i64; 4] = [0, 1, 2, 2];
21956 let e: [i64; 3] = [1, 2, 2];
21957 let mut r: [i64; 3] = [0i64; 3];
a2a8927a 21958 vst3_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21959 assert_eq!(r, e);
21960 }
21961
21962 #[simd_test(enable = "neon")]
3c0e092e
XL
21963 unsafe fn test_vst3q_lane_s64() {
21964 let a: [i64; 7] = [0, 1, 2, 2, 4, 2, 4];
21965 let e: [i64; 6] = [1, 2, 2, 0, 0, 0];
21966 let mut r: [i64; 6] = [0i64; 6];
a2a8927a 21967 vst3q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21968 assert_eq!(r, e);
21969 }
21970
21971 #[simd_test(enable = "neon")]
3c0e092e
XL
21972 unsafe fn test_vst3q_lane_u8() {
21973 let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
21974 let e: [u8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21975 let mut r: [u8; 48] = [0u8; 48];
a2a8927a 21976 vst3q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21977 assert_eq!(r, e);
21978 }
21979
21980 #[simd_test(enable = "neon")]
3c0e092e
XL
21981 unsafe fn test_vst3_lane_u64() {
21982 let a: [u64; 4] = [0, 1, 2, 2];
21983 let e: [u64; 3] = [1, 2, 2];
21984 let mut r: [u64; 3] = [0u64; 3];
a2a8927a 21985 vst3_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21986 assert_eq!(r, e);
21987 }
21988
21989 #[simd_test(enable = "neon")]
3c0e092e
XL
21990 unsafe fn test_vst3q_lane_u64() {
21991 let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
21992 let e: [u64; 6] = [1, 2, 2, 0, 0, 0];
21993 let mut r: [u64; 6] = [0u64; 6];
a2a8927a 21994 vst3q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
21995 assert_eq!(r, e);
21996 }
21997
21998 #[simd_test(enable = "neon")]
3c0e092e
XL
21999 unsafe fn test_vst3q_lane_p8() {
22000 let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
22001 let e: [u8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22002 let mut r: [u8; 48] = [0u8; 48];
a2a8927a 22003 vst3q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22004 assert_eq!(r, e);
22005 }
22006
22007 #[simd_test(enable = "neon")]
3c0e092e
XL
22008 unsafe fn test_vst3_lane_p64() {
22009 let a: [u64; 4] = [0, 1, 2, 2];
22010 let e: [u64; 3] = [1, 2, 2];
22011 let mut r: [u64; 3] = [0u64; 3];
a2a8927a 22012 vst3_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22013 assert_eq!(r, e);
22014 }
22015
22016 #[simd_test(enable = "neon")]
3c0e092e
XL
22017 unsafe fn test_vst3q_lane_p64() {
22018 let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
22019 let e: [u64; 6] = [1, 2, 2, 0, 0, 0];
22020 let mut r: [u64; 6] = [0u64; 6];
a2a8927a 22021 vst3q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22022 assert_eq!(r, e);
22023 }
22024
22025 #[simd_test(enable = "neon")]
3c0e092e
XL
22026 unsafe fn test_vst3_lane_f64() {
22027 let a: [f64; 4] = [0., 1., 2., 2.];
22028 let e: [f64; 3] = [1., 2., 2.];
22029 let mut r: [f64; 3] = [0f64; 3];
a2a8927a 22030 vst3_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22031 assert_eq!(r, e);
22032 }
22033
22034 #[simd_test(enable = "neon")]
3c0e092e
XL
22035 unsafe fn test_vst3q_lane_f64() {
22036 let a: [f64; 7] = [0., 1., 2., 2., 3., 2., 3.];
22037 let e: [f64; 6] = [1., 2., 2., 0., 0., 0.];
22038 let mut r: [f64; 6] = [0f64; 6];
a2a8927a 22039 vst3q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22040 assert_eq!(r, e);
22041 }
22042
22043 #[simd_test(enable = "neon")]
3c0e092e
XL
22044 unsafe fn test_vst4q_s64() {
22045 let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22046 let e: [i64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22047 let mut r: [i64; 8] = [0i64; 8];
a2a8927a 22048 vst4q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22049 assert_eq!(r, e);
22050 }
22051
22052 #[simd_test(enable = "neon")]
3c0e092e
XL
22053 unsafe fn test_vst4q_u64() {
22054 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22055 let e: [u64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22056 let mut r: [u64; 8] = [0u64; 8];
a2a8927a 22057 vst4q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22058 assert_eq!(r, e);
22059 }
22060
22061 #[simd_test(enable = "neon")]
3c0e092e
XL
22062 unsafe fn test_vst4q_p64() {
22063 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22064 let e: [u64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22065 let mut r: [u64; 8] = [0u64; 8];
a2a8927a 22066 vst4q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22067 assert_eq!(r, e);
22068 }
22069
22070 #[simd_test(enable = "neon")]
3c0e092e
XL
22071 unsafe fn test_vst4_f64() {
22072 let a: [f64; 5] = [0., 1., 2., 2., 6.];
22073 let e: [f64; 4] = [1., 2., 2., 6.];
22074 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 22075 vst4_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22076 assert_eq!(r, e);
22077 }
22078
22079 #[simd_test(enable = "neon")]
3c0e092e
XL
22080 unsafe fn test_vst4q_f64() {
22081 let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
22082 let e: [f64; 8] = [1., 2., 2., 6., 2., 6., 6., 8.];
22083 let mut r: [f64; 8] = [0f64; 8];
a2a8927a 22084 vst4q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22085 assert_eq!(r, e);
22086 }
22087
22088 #[simd_test(enable = "neon")]
3c0e092e
XL
22089 unsafe fn test_vst4q_lane_s8() {
22090 let a: [i8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22091 let e: [i8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22092 let mut r: [i8; 64] = [0i8; 64];
a2a8927a 22093 vst4q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22094 assert_eq!(r, e);
22095 }
22096
22097 #[simd_test(enable = "neon")]
3c0e092e
XL
22098 unsafe fn test_vst4_lane_s64() {
22099 let a: [i64; 5] = [0, 1, 2, 2, 6];
22100 let e: [i64; 4] = [1, 2, 2, 6];
22101 let mut r: [i64; 4] = [0i64; 4];
a2a8927a 22102 vst4_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22103 assert_eq!(r, e);
22104 }
22105
22106 #[simd_test(enable = "neon")]
3c0e092e
XL
22107 unsafe fn test_vst4q_lane_s64() {
22108 let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22109 let e: [i64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22110 let mut r: [i64; 8] = [0i64; 8];
a2a8927a 22111 vst4q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22112 assert_eq!(r, e);
22113 }
22114
22115 #[simd_test(enable = "neon")]
3c0e092e
XL
22116 unsafe fn test_vst4q_lane_u8() {
22117 let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22118 let e: [u8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22119 let mut r: [u8; 64] = [0u8; 64];
a2a8927a 22120 vst4q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22121 assert_eq!(r, e);
22122 }
22123
22124 #[simd_test(enable = "neon")]
3c0e092e
XL
22125 unsafe fn test_vst4_lane_u64() {
22126 let a: [u64; 5] = [0, 1, 2, 2, 6];
22127 let e: [u64; 4] = [1, 2, 2, 6];
22128 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 22129 vst4_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22130 assert_eq!(r, e);
22131 }
22132
22133 #[simd_test(enable = "neon")]
3c0e092e
XL
22134 unsafe fn test_vst4q_lane_u64() {
22135 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22136 let e: [u64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22137 let mut r: [u64; 8] = [0u64; 8];
a2a8927a 22138 vst4q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22139 assert_eq!(r, e);
22140 }
22141
22142 #[simd_test(enable = "neon")]
3c0e092e
XL
22143 unsafe fn test_vst4q_lane_p8() {
22144 let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22145 let e: [u8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22146 let mut r: [u8; 64] = [0u8; 64];
a2a8927a 22147 vst4q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22148 assert_eq!(r, e);
22149 }
22150
22151 #[simd_test(enable = "neon")]
3c0e092e
XL
22152 unsafe fn test_vst4_lane_p64() {
22153 let a: [u64; 5] = [0, 1, 2, 2, 6];
22154 let e: [u64; 4] = [1, 2, 2, 6];
22155 let mut r: [u64; 4] = [0u64; 4];
a2a8927a 22156 vst4_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22157 assert_eq!(r, e);
22158 }
22159
22160 #[simd_test(enable = "neon")]
3c0e092e
XL
22161 unsafe fn test_vst4q_lane_p64() {
22162 let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22163 let e: [u64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22164 let mut r: [u64; 8] = [0u64; 8];
a2a8927a 22165 vst4q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22166 assert_eq!(r, e);
22167 }
22168
22169 #[simd_test(enable = "neon")]
3c0e092e
XL
22170 unsafe fn test_vst4_lane_f64() {
22171 let a: [f64; 5] = [0., 1., 2., 2., 6.];
22172 let e: [f64; 4] = [1., 2., 2., 6.];
22173 let mut r: [f64; 4] = [0f64; 4];
a2a8927a 22174 vst4_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22175 assert_eq!(r, e);
22176 }
22177
22178 #[simd_test(enable = "neon")]
3c0e092e
XL
22179 unsafe fn test_vst4q_lane_f64() {
22180 let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
22181 let e: [f64; 8] = [1., 2., 2., 6., 0., 0., 0., 0.];
22182 let mut r: [f64; 8] = [0f64; 8];
a2a8927a 22183 vst4q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
17df50a5
XL
22184 assert_eq!(r, e);
22185 }
22186
22187 #[simd_test(enable = "neon")]
3c0e092e
XL
22188 unsafe fn test_vmul_f64() {
22189 let a: f64 = 1.0;
22190 let b: f64 = 2.0;
22191 let e: f64 = 2.0;
22192 let r: f64 = transmute(vmul_f64(transmute(a), transmute(b)));
17df50a5
XL
22193 assert_eq!(r, e);
22194 }
22195
22196 #[simd_test(enable = "neon")]
3c0e092e
XL
22197 unsafe fn test_vmulq_f64() {
22198 let a: f64x2 = f64x2::new(1.0, 2.0);
22199 let b: f64x2 = f64x2::new(2.0, 3.0);
22200 let e: f64x2 = f64x2::new(2.0, 6.0);
22201 let r: f64x2 = transmute(vmulq_f64(transmute(a), transmute(b)));
17df50a5
XL
22202 assert_eq!(r, e);
22203 }
22204
22205 #[simd_test(enable = "neon")]
3c0e092e
XL
22206 unsafe fn test_vmul_n_f64() {
22207 let a: f64 = 1.;
22208 let b: f64 = 2.;
22209 let e: f64 = 2.;
22210 let r: f64 = transmute(vmul_n_f64(transmute(a), transmute(b)));
17df50a5
XL
22211 assert_eq!(r, e);
22212 }
22213
22214 #[simd_test(enable = "neon")]
3c0e092e
XL
22215 unsafe fn test_vmulq_n_f64() {
22216 let a: f64x2 = f64x2::new(1., 2.);
22217 let b: f64 = 2.;
22218 let e: f64x2 = f64x2::new(2., 4.);
22219 let r: f64x2 = transmute(vmulq_n_f64(transmute(a), transmute(b)));
17df50a5
XL
22220 assert_eq!(r, e);
22221 }
22222
22223 #[simd_test(enable = "neon")]
3c0e092e
XL
22224 unsafe fn test_vmul_lane_f64() {
22225 let a: f64 = 1.;
22226 let b: f64 = 2.;
22227 let e: f64 = 2.;
22228 let r: f64 = transmute(vmul_lane_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22229 assert_eq!(r, e);
22230 }
22231
22232 #[simd_test(enable = "neon")]
3c0e092e
XL
22233 unsafe fn test_vmul_laneq_f64() {
22234 let a: f64 = 1.;
22235 let b: f64x2 = f64x2::new(2., 0.);
22236 let e: f64 = 2.;
22237 let r: f64 = transmute(vmul_laneq_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22238 assert_eq!(r, e);
22239 }
22240
22241 #[simd_test(enable = "neon")]
3c0e092e
XL
22242 unsafe fn test_vmulq_lane_f64() {
22243 let a: f64x2 = f64x2::new(1., 2.);
22244 let b: f64 = 2.;
22245 let e: f64x2 = f64x2::new(2., 4.);
22246 let r: f64x2 = transmute(vmulq_lane_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22247 assert_eq!(r, e);
22248 }
22249
22250 #[simd_test(enable = "neon")]
3c0e092e
XL
22251 unsafe fn test_vmulq_laneq_f64() {
22252 let a: f64x2 = f64x2::new(1., 2.);
22253 let b: f64x2 = f64x2::new(2., 0.);
22254 let e: f64x2 = f64x2::new(2., 4.);
22255 let r: f64x2 = transmute(vmulq_laneq_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22256 assert_eq!(r, e);
22257 }
22258
22259 #[simd_test(enable = "neon")]
3c0e092e
XL
22260 unsafe fn test_vmuls_lane_f32() {
22261 let a: f32 = 1.;
22262 let b: f32x2 = f32x2::new(2., 0.);
22263 let e: f32 = 2.;
22264 let r: f32 = transmute(vmuls_lane_f32::<0>(transmute(a), transmute(b)));
22265 assert_eq!(r, e);
22266 }
22267
22268 #[simd_test(enable = "neon")]
22269 unsafe fn test_vmuls_laneq_f32() {
22270 let a: f32 = 1.;
22271 let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22272 let e: f32 = 2.;
22273 let r: f32 = transmute(vmuls_laneq_f32::<0>(transmute(a), transmute(b)));
22274 assert_eq!(r, e);
22275 }
22276
22277 #[simd_test(enable = "neon")]
22278 unsafe fn test_vmuld_lane_f64() {
22279 let a: f64 = 1.;
22280 let b: f64 = 2.;
22281 let e: f64 = 2.;
22282 let r: f64 = transmute(vmuld_lane_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22283 assert_eq!(r, e);
22284 }
22285
22286 #[simd_test(enable = "neon")]
3c0e092e
XL
22287 unsafe fn test_vmuld_laneq_f64() {
22288 let a: f64 = 1.;
22289 let b: f64x2 = f64x2::new(2., 0.);
22290 let e: f64 = 2.;
22291 let r: f64 = transmute(vmuld_laneq_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22292 assert_eq!(r, e);
22293 }
22294
22295 #[simd_test(enable = "neon")]
3c0e092e
XL
22296 unsafe fn test_vmull_high_s8() {
22297 let a: i8x16 = i8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22298 let b: i8x16 = i8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
22299 let e: i16x8 = i16x8::new(9, 20, 11, 24, 13, 28, 15, 32);
22300 let r: i16x8 = transmute(vmull_high_s8(transmute(a), transmute(b)));
17df50a5
XL
22301 assert_eq!(r, e);
22302 }
22303
22304 #[simd_test(enable = "neon")]
3c0e092e
XL
22305 unsafe fn test_vmull_high_s16() {
22306 let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22307 let b: i16x8 = i16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
22308 let e: i32x4 = i32x4::new(9, 20, 11, 24);
22309 let r: i32x4 = transmute(vmull_high_s16(transmute(a), transmute(b)));
17df50a5
XL
22310 assert_eq!(r, e);
22311 }
22312
22313 #[simd_test(enable = "neon")]
3c0e092e
XL
22314 unsafe fn test_vmull_high_s32() {
22315 let a: i32x4 = i32x4::new(1, 2, 9, 10);
22316 let b: i32x4 = i32x4::new(1, 2, 1, 2);
22317 let e: i64x2 = i64x2::new(9, 20);
22318 let r: i64x2 = transmute(vmull_high_s32(transmute(a), transmute(b)));
17df50a5
XL
22319 assert_eq!(r, e);
22320 }
22321
22322 #[simd_test(enable = "neon")]
3c0e092e
XL
22323 unsafe fn test_vmull_high_u8() {
22324 let a: u8x16 = u8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22325 let b: u8x16 = u8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
22326 let e: u16x8 = u16x8::new(9, 20, 11, 24, 13, 28, 15, 32);
22327 let r: u16x8 = transmute(vmull_high_u8(transmute(a), transmute(b)));
17df50a5
XL
22328 assert_eq!(r, e);
22329 }
22330
22331 #[simd_test(enable = "neon")]
3c0e092e
XL
22332 unsafe fn test_vmull_high_u16() {
22333 let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22334 let b: u16x8 = u16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
22335 let e: u32x4 = u32x4::new(9, 20, 11, 24);
22336 let r: u32x4 = transmute(vmull_high_u16(transmute(a), transmute(b)));
17df50a5
XL
22337 assert_eq!(r, e);
22338 }
22339
22340 #[simd_test(enable = "neon")]
3c0e092e
XL
22341 unsafe fn test_vmull_high_u32() {
22342 let a: u32x4 = u32x4::new(1, 2, 9, 10);
22343 let b: u32x4 = u32x4::new(1, 2, 1, 2);
22344 let e: u64x2 = u64x2::new(9, 20);
22345 let r: u64x2 = transmute(vmull_high_u32(transmute(a), transmute(b)));
17df50a5
XL
22346 assert_eq!(r, e);
22347 }
22348
353b0b11 22349 #[simd_test(enable = "neon,aes")]
3c0e092e
XL
22350 unsafe fn test_vmull_p64() {
22351 let a: p64 = 15;
22352 let b: p64 = 3;
22353 let e: p128 = 17;
22354 let r: p128 = transmute(vmull_p64(transmute(a), transmute(b)));
17df50a5
XL
22355 assert_eq!(r, e);
22356 }
22357
22358 #[simd_test(enable = "neon")]
3c0e092e
XL
22359 unsafe fn test_vmull_high_p8() {
22360 let a: i8x16 = i8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22361 let b: i8x16 = i8x16::new(1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3);
22362 let e: i16x8 = i16x8::new(9, 30, 11, 20, 13, 18, 15, 48);
22363 let r: i16x8 = transmute(vmull_high_p8(transmute(a), transmute(b)));
17df50a5
XL
22364 assert_eq!(r, e);
22365 }
22366
353b0b11 22367 #[simd_test(enable = "neon,aes")]
3c0e092e
XL
22368 unsafe fn test_vmull_high_p64() {
22369 let a: i64x2 = i64x2::new(1, 15);
22370 let b: i64x2 = i64x2::new(1, 3);
22371 let e: p128 = 17;
22372 let r: p128 = transmute(vmull_high_p64(transmute(a), transmute(b)));
17df50a5
XL
22373 assert_eq!(r, e);
22374 }
22375
22376 #[simd_test(enable = "neon")]
3c0e092e
XL
22377 unsafe fn test_vmull_high_n_s16() {
22378 let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22379 let b: i16 = 2;
22380 let e: i32x4 = i32x4::new(18, 20, 22, 24);
22381 let r: i32x4 = transmute(vmull_high_n_s16(transmute(a), transmute(b)));
17df50a5
XL
22382 assert_eq!(r, e);
22383 }
22384
22385 #[simd_test(enable = "neon")]
3c0e092e
XL
22386 unsafe fn test_vmull_high_n_s32() {
22387 let a: i32x4 = i32x4::new(1, 2, 9, 10);
22388 let b: i32 = 2;
22389 let e: i64x2 = i64x2::new(18, 20);
22390 let r: i64x2 = transmute(vmull_high_n_s32(transmute(a), transmute(b)));
17df50a5
XL
22391 assert_eq!(r, e);
22392 }
22393
22394 #[simd_test(enable = "neon")]
3c0e092e
XL
22395 unsafe fn test_vmull_high_n_u16() {
22396 let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22397 let b: u16 = 2;
22398 let e: u32x4 = u32x4::new(18, 20, 22, 24);
22399 let r: u32x4 = transmute(vmull_high_n_u16(transmute(a), transmute(b)));
17df50a5
XL
22400 assert_eq!(r, e);
22401 }
22402
22403 #[simd_test(enable = "neon")]
3c0e092e
XL
22404 unsafe fn test_vmull_high_n_u32() {
22405 let a: u32x4 = u32x4::new(1, 2, 9, 10);
22406 let b: u32 = 2;
22407 let e: u64x2 = u64x2::new(18, 20);
22408 let r: u64x2 = transmute(vmull_high_n_u32(transmute(a), transmute(b)));
17df50a5
XL
22409 assert_eq!(r, e);
22410 }
22411
22412 #[simd_test(enable = "neon")]
3c0e092e
XL
22413 unsafe fn test_vmull_high_lane_s16() {
22414 let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22415 let b: i16x4 = i16x4::new(0, 2, 0, 0);
22416 let e: i32x4 = i32x4::new(18, 20, 22, 24);
22417 let r: i32x4 = transmute(vmull_high_lane_s16::<1>(transmute(a), transmute(b)));
17df50a5
XL
22418 assert_eq!(r, e);
22419 }
22420
22421 #[simd_test(enable = "neon")]
3c0e092e
XL
22422 unsafe fn test_vmull_high_laneq_s16() {
22423 let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22424 let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
22425 let e: i32x4 = i32x4::new(18, 20, 22, 24);
22426 let r: i32x4 = transmute(vmull_high_laneq_s16::<1>(transmute(a), transmute(b)));
17df50a5
XL
22427 assert_eq!(r, e);
22428 }
22429
22430 #[simd_test(enable = "neon")]
3c0e092e
XL
22431 unsafe fn test_vmull_high_lane_s32() {
22432 let a: i32x4 = i32x4::new(1, 2, 9, 10);
22433 let b: i32x2 = i32x2::new(0, 2);
22434 let e: i64x2 = i64x2::new(18, 20);
22435 let r: i64x2 = transmute(vmull_high_lane_s32::<1>(transmute(a), transmute(b)));
17df50a5
XL
22436 assert_eq!(r, e);
22437 }
22438
22439 #[simd_test(enable = "neon")]
3c0e092e
XL
22440 unsafe fn test_vmull_high_laneq_s32() {
22441 let a: i32x4 = i32x4::new(1, 2, 9, 10);
22442 let b: i32x4 = i32x4::new(0, 2, 0, 0);
22443 let e: i64x2 = i64x2::new(18, 20);
22444 let r: i64x2 = transmute(vmull_high_laneq_s32::<1>(transmute(a), transmute(b)));
17df50a5
XL
22445 assert_eq!(r, e);
22446 }
22447
22448 #[simd_test(enable = "neon")]
3c0e092e
XL
22449 unsafe fn test_vmull_high_lane_u16() {
22450 let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22451 let b: u16x4 = u16x4::new(0, 2, 0, 0);
22452 let e: u32x4 = u32x4::new(18, 20, 22, 24);
22453 let r: u32x4 = transmute(vmull_high_lane_u16::<1>(transmute(a), transmute(b)));
17df50a5
XL
22454 assert_eq!(r, e);
22455 }
22456
22457 #[simd_test(enable = "neon")]
3c0e092e
XL
22458 unsafe fn test_vmull_high_laneq_u16() {
22459 let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22460 let b: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
22461 let e: u32x4 = u32x4::new(18, 20, 22, 24);
22462 let r: u32x4 = transmute(vmull_high_laneq_u16::<1>(transmute(a), transmute(b)));
17df50a5
XL
22463 assert_eq!(r, e);
22464 }
22465
22466 #[simd_test(enable = "neon")]
3c0e092e
XL
22467 unsafe fn test_vmull_high_lane_u32() {
22468 let a: u32x4 = u32x4::new(1, 2, 9, 10);
22469 let b: u32x2 = u32x2::new(0, 2);
22470 let e: u64x2 = u64x2::new(18, 20);
22471 let r: u64x2 = transmute(vmull_high_lane_u32::<1>(transmute(a), transmute(b)));
17df50a5
XL
22472 assert_eq!(r, e);
22473 }
22474
22475 #[simd_test(enable = "neon")]
3c0e092e
XL
22476 unsafe fn test_vmull_high_laneq_u32() {
22477 let a: u32x4 = u32x4::new(1, 2, 9, 10);
22478 let b: u32x4 = u32x4::new(0, 2, 0, 0);
22479 let e: u64x2 = u64x2::new(18, 20);
22480 let r: u64x2 = transmute(vmull_high_laneq_u32::<1>(transmute(a), transmute(b)));
17df50a5
XL
22481 assert_eq!(r, e);
22482 }
22483
22484 #[simd_test(enable = "neon")]
3c0e092e
XL
22485 unsafe fn test_vmulx_f32() {
22486 let a: f32x2 = f32x2::new(1., 2.);
22487 let b: f32x2 = f32x2::new(2., 2.);
22488 let e: f32x2 = f32x2::new(2., 4.);
22489 let r: f32x2 = transmute(vmulx_f32(transmute(a), transmute(b)));
17df50a5
XL
22490 assert_eq!(r, e);
22491 }
22492
22493 #[simd_test(enable = "neon")]
3c0e092e
XL
22494 unsafe fn test_vmulxq_f32() {
22495 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22496 let b: f32x4 = f32x4::new(2., 2., 2., 2.);
22497 let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22498 let r: f32x4 = transmute(vmulxq_f32(transmute(a), transmute(b)));
17df50a5
XL
22499 assert_eq!(r, e);
22500 }
22501
22502 #[simd_test(enable = "neon")]
3c0e092e
XL
22503 unsafe fn test_vmulx_f64() {
22504 let a: f64 = 1.;
22505 let b: f64 = 2.;
22506 let e: f64 = 2.;
22507 let r: f64 = transmute(vmulx_f64(transmute(a), transmute(b)));
17df50a5
XL
22508 assert_eq!(r, e);
22509 }
22510
22511 #[simd_test(enable = "neon")]
3c0e092e
XL
22512 unsafe fn test_vmulxq_f64() {
22513 let a: f64x2 = f64x2::new(1., 2.);
22514 let b: f64x2 = f64x2::new(2., 2.);
22515 let e: f64x2 = f64x2::new(2., 4.);
22516 let r: f64x2 = transmute(vmulxq_f64(transmute(a), transmute(b)));
17df50a5
XL
22517 assert_eq!(r, e);
22518 }
22519
22520 #[simd_test(enable = "neon")]
3c0e092e
XL
22521 unsafe fn test_vmulx_lane_f64() {
22522 let a: f64 = 1.;
22523 let b: f64 = 2.;
22524 let e: f64 = 2.;
22525 let r: f64 = transmute(vmulx_lane_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22526 assert_eq!(r, e);
22527 }
22528
22529 #[simd_test(enable = "neon")]
3c0e092e
XL
22530 unsafe fn test_vmulx_laneq_f64() {
22531 let a: f64 = 1.;
22532 let b: f64x2 = f64x2::new(2., 0.);
22533 let e: f64 = 2.;
22534 let r: f64 = transmute(vmulx_laneq_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22535 assert_eq!(r, e);
22536 }
22537
22538 #[simd_test(enable = "neon")]
3c0e092e
XL
22539 unsafe fn test_vmulx_lane_f32() {
22540 let a: f32x2 = f32x2::new(1., 2.);
22541 let b: f32x2 = f32x2::new(2., 0.);
22542 let e: f32x2 = f32x2::new(2., 4.);
22543 let r: f32x2 = transmute(vmulx_lane_f32::<0>(transmute(a), transmute(b)));
17df50a5
XL
22544 assert_eq!(r, e);
22545 }
22546
22547 #[simd_test(enable = "neon")]
3c0e092e
XL
22548 unsafe fn test_vmulx_laneq_f32() {
22549 let a: f32x2 = f32x2::new(1., 2.);
22550 let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22551 let e: f32x2 = f32x2::new(2., 4.);
22552 let r: f32x2 = transmute(vmulx_laneq_f32::<0>(transmute(a), transmute(b)));
17df50a5
XL
22553 assert_eq!(r, e);
22554 }
22555
22556 #[simd_test(enable = "neon")]
3c0e092e
XL
22557 unsafe fn test_vmulxq_lane_f32() {
22558 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22559 let b: f32x2 = f32x2::new(2., 0.);
22560 let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22561 let r: f32x4 = transmute(vmulxq_lane_f32::<0>(transmute(a), transmute(b)));
17df50a5
XL
22562 assert_eq!(r, e);
22563 }
22564
22565 #[simd_test(enable = "neon")]
3c0e092e
XL
22566 unsafe fn test_vmulxq_laneq_f32() {
22567 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22568 let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22569 let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22570 let r: f32x4 = transmute(vmulxq_laneq_f32::<0>(transmute(a), transmute(b)));
17df50a5
XL
22571 assert_eq!(r, e);
22572 }
22573
22574 #[simd_test(enable = "neon")]
3c0e092e
XL
22575 unsafe fn test_vmulxq_lane_f64() {
22576 let a: f64x2 = f64x2::new(1., 2.);
22577 let b: f64 = 2.;
22578 let e: f64x2 = f64x2::new(2., 4.);
22579 let r: f64x2 = transmute(vmulxq_lane_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22580 assert_eq!(r, e);
22581 }
22582
22583 #[simd_test(enable = "neon")]
3c0e092e
XL
22584 unsafe fn test_vmulxq_laneq_f64() {
22585 let a: f64x2 = f64x2::new(1., 2.);
22586 let b: f64x2 = f64x2::new(2., 0.);
22587 let e: f64x2 = f64x2::new(2., 4.);
22588 let r: f64x2 = transmute(vmulxq_laneq_f64::<0>(transmute(a), transmute(b)));
17df50a5
XL
22589 assert_eq!(r, e);
22590 }
22591
22592 #[simd_test(enable = "neon")]
3c0e092e
XL
22593 unsafe fn test_vmulxs_f32() {
22594 let a: f32 = 2.;
22595 let b: f32 = 3.;
22596 let e: f32 = 6.;
22597 let r: f32 = transmute(vmulxs_f32(transmute(a), transmute(b)));
17df50a5
XL
22598 assert_eq!(r, e);
22599 }
22600
c295e0f8 22601 #[simd_test(enable = "neon")]
3c0e092e
XL
22602 unsafe fn test_vmulxd_f64() {
22603 let a: f64 = 2.;
22604 let b: f64 = 3.;
22605 let e: f64 = 6.;
22606 let r: f64 = transmute(vmulxd_f64(transmute(a), transmute(b)));
c295e0f8
XL
22607 assert_eq!(r, e);
22608 }
22609
22610 #[simd_test(enable = "neon")]
3c0e092e
XL
22611 unsafe fn test_vmulxs_lane_f32() {
22612 let a: f32 = 2.;
22613 let b: f32x2 = f32x2::new(3., 0.);
22614 let e: f32 = 6.;
22615 let r: f32 = transmute(vmulxs_lane_f32::<0>(transmute(a), transmute(b)));
c295e0f8
XL
22616 assert_eq!(r, e);
22617 }
22618
22619 #[simd_test(enable = "neon")]
3c0e092e
XL
22620 unsafe fn test_vmulxs_laneq_f32() {
22621 let a: f32 = 2.;
22622 let b: f32x4 = f32x4::new(3., 0., 0., 0.);
22623 let e: f32 = 6.;
22624 let r: f32 = transmute(vmulxs_laneq_f32::<0>(transmute(a), transmute(b)));
c295e0f8
XL
22625 assert_eq!(r, e);
22626 }
22627
22628 #[simd_test(enable = "neon")]
3c0e092e
XL
22629 unsafe fn test_vmulxd_lane_f64() {
22630 let a: f64 = 2.;
22631 let b: f64 = 3.;
22632 let e: f64 = 6.;
22633 let r: f64 = transmute(vmulxd_lane_f64::<0>(transmute(a), transmute(b)));
c295e0f8
XL
22634 assert_eq!(r, e);
22635 }
22636
22637 #[simd_test(enable = "neon")]
3c0e092e
XL
22638 unsafe fn test_vmulxd_laneq_f64() {
22639 let a: f64 = 2.;
22640 let b: f64x2 = f64x2::new(3., 0.);
22641 let e: f64 = 6.;
22642 let r: f64 = transmute(vmulxd_laneq_f64::<0>(transmute(a), transmute(b)));
c295e0f8
XL
22643 assert_eq!(r, e);
22644 }
22645
22646 #[simd_test(enable = "neon")]
3c0e092e
XL
22647 unsafe fn test_vfma_f64() {
22648 let a: f64 = 8.0;
22649 let b: f64 = 6.0;
22650 let c: f64 = 2.0;
22651 let e: f64 = 20.0;
22652 let r: f64 = transmute(vfma_f64(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22653 assert_eq!(r, e);
22654 }
22655
22656 #[simd_test(enable = "neon")]
3c0e092e
XL
22657 unsafe fn test_vfmaq_f64() {
22658 let a: f64x2 = f64x2::new(8.0, 18.0);
22659 let b: f64x2 = f64x2::new(6.0, 4.0);
22660 let c: f64x2 = f64x2::new(2.0, 3.0);
22661 let e: f64x2 = f64x2::new(20.0, 30.0);
22662 let r: f64x2 = transmute(vfmaq_f64(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22663 assert_eq!(r, e);
22664 }
22665
22666 #[simd_test(enable = "neon")]
3c0e092e
XL
22667 unsafe fn test_vfma_n_f64() {
22668 let a: f64 = 2.0;
22669 let b: f64 = 6.0;
22670 let c: f64 = 8.0;
22671 let e: f64 = 50.0;
22672 let r: f64 = transmute(vfma_n_f64(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22673 assert_eq!(r, e);
22674 }
22675
22676 #[simd_test(enable = "neon")]
3c0e092e
XL
22677 unsafe fn test_vfmaq_n_f64() {
22678 let a: f64x2 = f64x2::new(2.0, 3.0);
22679 let b: f64x2 = f64x2::new(6.0, 4.0);
22680 let c: f64 = 8.0;
22681 let e: f64x2 = f64x2::new(50.0, 35.0);
22682 let r: f64x2 = transmute(vfmaq_n_f64(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22683 assert_eq!(r, e);
22684 }
22685
22686 #[simd_test(enable = "neon")]
3c0e092e
XL
22687 unsafe fn test_vfma_lane_f32() {
22688 let a: f32x2 = f32x2::new(2., 3.);
22689 let b: f32x2 = f32x2::new(6., 4.);
22690 let c: f32x2 = f32x2::new(2., 0.);
22691 let e: f32x2 = f32x2::new(14., 11.);
22692 let r: f32x2 = transmute(vfma_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22693 assert_eq!(r, e);
22694 }
22695
22696 #[simd_test(enable = "neon")]
3c0e092e
XL
22697 unsafe fn test_vfma_laneq_f32() {
22698 let a: f32x2 = f32x2::new(2., 3.);
22699 let b: f32x2 = f32x2::new(6., 4.);
22700 let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22701 let e: f32x2 = f32x2::new(14., 11.);
22702 let r: f32x2 = transmute(vfma_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22703 assert_eq!(r, e);
22704 }
22705
22706 #[simd_test(enable = "neon")]
3c0e092e
XL
22707 unsafe fn test_vfmaq_lane_f32() {
22708 let a: f32x4 = f32x4::new(2., 3., 4., 5.);
22709 let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22710 let c: f32x2 = f32x2::new(2., 0.);
22711 let e: f32x4 = f32x4::new(14., 11., 18., 21.);
22712 let r: f32x4 = transmute(vfmaq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
c295e0f8
XL
22713 assert_eq!(r, e);
22714 }
22715
17df50a5 22716 #[simd_test(enable = "neon")]
3c0e092e
XL
22717 unsafe fn test_vfmaq_laneq_f32() {
22718 let a: f32x4 = f32x4::new(2., 3., 4., 5.);
22719 let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22720 let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22721 let e: f32x4 = f32x4::new(14., 11., 18., 21.);
22722 let r: f32x4 = transmute(vfmaq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22723 assert_eq!(r, e);
22724 }
22725
22726 #[simd_test(enable = "neon")]
3c0e092e
XL
22727 unsafe fn test_vfma_lane_f64() {
22728 let a: f64 = 2.;
22729 let b: f64 = 6.;
22730 let c: f64 = 2.;
22731 let e: f64 = 14.;
22732 let r: f64 = transmute(vfma_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22733 assert_eq!(r, e);
22734 }
22735
22736 #[simd_test(enable = "neon")]
3c0e092e
XL
22737 unsafe fn test_vfma_laneq_f64() {
22738 let a: f64 = 2.;
22739 let b: f64 = 6.;
22740 let c: f64x2 = f64x2::new(2., 0.);
22741 let e: f64 = 14.;
22742 let r: f64 = transmute(vfma_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22743 assert_eq!(r, e);
22744 }
22745
22746 #[simd_test(enable = "neon")]
3c0e092e
XL
22747 unsafe fn test_vfmaq_lane_f64() {
22748 let a: f64x2 = f64x2::new(2., 3.);
22749 let b: f64x2 = f64x2::new(6., 4.);
22750 let c: f64 = 2.;
22751 let e: f64x2 = f64x2::new(14., 11.);
22752 let r: f64x2 = transmute(vfmaq_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22753 assert_eq!(r, e);
22754 }
22755
22756 #[simd_test(enable = "neon")]
3c0e092e
XL
22757 unsafe fn test_vfmaq_laneq_f64() {
22758 let a: f64x2 = f64x2::new(2., 3.);
22759 let b: f64x2 = f64x2::new(6., 4.);
22760 let c: f64x2 = f64x2::new(2., 0.);
22761 let e: f64x2 = f64x2::new(14., 11.);
22762 let r: f64x2 = transmute(vfmaq_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22763 assert_eq!(r, e);
22764 }
22765
22766 #[simd_test(enable = "neon")]
3c0e092e
XL
22767 unsafe fn test_vfmas_lane_f32() {
22768 let a: f32 = 2.;
22769 let b: f32 = 6.;
22770 let c: f32x2 = f32x2::new(3., 0.);
22771 let e: f32 = 20.;
22772 let r: f32 = transmute(vfmas_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22773 assert_eq!(r, e);
22774 }
22775
22776 #[simd_test(enable = "neon")]
3c0e092e
XL
22777 unsafe fn test_vfmas_laneq_f32() {
22778 let a: f32 = 2.;
22779 let b: f32 = 6.;
22780 let c: f32x4 = f32x4::new(3., 0., 0., 0.);
22781 let e: f32 = 20.;
22782 let r: f32 = transmute(vfmas_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22783 assert_eq!(r, e);
22784 }
22785
22786 #[simd_test(enable = "neon")]
3c0e092e
XL
22787 unsafe fn test_vfmad_lane_f64() {
22788 let a: f64 = 2.;
22789 let b: f64 = 6.;
22790 let c: f64 = 3.;
22791 let e: f64 = 20.;
22792 let r: f64 = transmute(vfmad_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22793 assert_eq!(r, e);
22794 }
22795
22796 #[simd_test(enable = "neon")]
3c0e092e
XL
22797 unsafe fn test_vfmad_laneq_f64() {
22798 let a: f64 = 2.;
22799 let b: f64 = 6.;
22800 let c: f64x2 = f64x2::new(3., 0.);
22801 let e: f64 = 20.;
22802 let r: f64 = transmute(vfmad_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22803 assert_eq!(r, e);
22804 }
22805
22806 #[simd_test(enable = "neon")]
3c0e092e
XL
22807 unsafe fn test_vfms_f64() {
22808 let a: f64 = 20.0;
22809 let b: f64 = 6.0;
22810 let c: f64 = 2.0;
22811 let e: f64 = 8.0;
22812 let r: f64 = transmute(vfms_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22813 assert_eq!(r, e);
22814 }
22815
22816 #[simd_test(enable = "neon")]
3c0e092e
XL
22817 unsafe fn test_vfmsq_f64() {
22818 let a: f64x2 = f64x2::new(20.0, 30.0);
22819 let b: f64x2 = f64x2::new(6.0, 4.0);
22820 let c: f64x2 = f64x2::new(2.0, 3.0);
22821 let e: f64x2 = f64x2::new(8.0, 18.0);
22822 let r: f64x2 = transmute(vfmsq_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22823 assert_eq!(r, e);
22824 }
22825
22826 #[simd_test(enable = "neon")]
3c0e092e
XL
22827 unsafe fn test_vfms_n_f64() {
22828 let a: f64 = 50.0;
22829 let b: f64 = 6.0;
22830 let c: f64 = 8.0;
22831 let e: f64 = 2.0;
22832 let r: f64 = transmute(vfms_n_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22833 assert_eq!(r, e);
22834 }
22835
22836 #[simd_test(enable = "neon")]
3c0e092e
XL
22837 unsafe fn test_vfmsq_n_f64() {
22838 let a: f64x2 = f64x2::new(50.0, 35.0);
22839 let b: f64x2 = f64x2::new(6.0, 4.0);
22840 let c: f64 = 8.0;
22841 let e: f64x2 = f64x2::new(2.0, 3.0);
22842 let r: f64x2 = transmute(vfmsq_n_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22843 assert_eq!(r, e);
22844 }
22845
22846 #[simd_test(enable = "neon")]
3c0e092e
XL
22847 unsafe fn test_vfms_lane_f32() {
22848 let a: f32x2 = f32x2::new(14., 11.);
22849 let b: f32x2 = f32x2::new(6., 4.);
22850 let c: f32x2 = f32x2::new(2., 0.);
22851 let e: f32x2 = f32x2::new(2., 3.);
22852 let r: f32x2 = transmute(vfms_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22853 assert_eq!(r, e);
22854 }
22855
22856 #[simd_test(enable = "neon")]
3c0e092e
XL
22857 unsafe fn test_vfms_laneq_f32() {
22858 let a: f32x2 = f32x2::new(14., 11.);
22859 let b: f32x2 = f32x2::new(6., 4.);
22860 let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22861 let e: f32x2 = f32x2::new(2., 3.);
22862 let r: f32x2 = transmute(vfms_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22863 assert_eq!(r, e);
22864 }
22865
22866 #[simd_test(enable = "neon")]
3c0e092e
XL
22867 unsafe fn test_vfmsq_lane_f32() {
22868 let a: f32x4 = f32x4::new(14., 11., 18., 21.);
22869 let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22870 let c: f32x2 = f32x2::new(2., 0.);
22871 let e: f32x4 = f32x4::new(2., 3., 4., 5.);
22872 let r: f32x4 = transmute(vfmsq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22873 assert_eq!(r, e);
22874 }
22875
22876 #[simd_test(enable = "neon")]
3c0e092e
XL
22877 unsafe fn test_vfmsq_laneq_f32() {
22878 let a: f32x4 = f32x4::new(14., 11., 18., 21.);
22879 let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22880 let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22881 let e: f32x4 = f32x4::new(2., 3., 4., 5.);
22882 let r: f32x4 = transmute(vfmsq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22883 assert_eq!(r, e);
22884 }
22885
22886 #[simd_test(enable = "neon")]
3c0e092e
XL
22887 unsafe fn test_vfms_lane_f64() {
22888 let a: f64 = 14.;
22889 let b: f64 = 6.;
22890 let c: f64 = 2.;
22891 let e: f64 = 2.;
22892 let r: f64 = transmute(vfms_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22893 assert_eq!(r, e);
22894 }
22895
22896 #[simd_test(enable = "neon")]
3c0e092e
XL
22897 unsafe fn test_vfms_laneq_f64() {
22898 let a: f64 = 14.;
22899 let b: f64 = 6.;
22900 let c: f64x2 = f64x2::new(2., 0.);
22901 let e: f64 = 2.;
22902 let r: f64 = transmute(vfms_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22903 assert_eq!(r, e);
22904 }
22905
22906 #[simd_test(enable = "neon")]
3c0e092e
XL
22907 unsafe fn test_vfmsq_lane_f64() {
22908 let a: f64x2 = f64x2::new(14., 11.);
22909 let b: f64x2 = f64x2::new(6., 4.);
22910 let c: f64 = 2.;
22911 let e: f64x2 = f64x2::new(2., 3.);
22912 let r: f64x2 = transmute(vfmsq_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22913 assert_eq!(r, e);
22914 }
22915
22916 #[simd_test(enable = "neon")]
3c0e092e
XL
22917 unsafe fn test_vfmsq_laneq_f64() {
22918 let a: f64x2 = f64x2::new(14., 11.);
22919 let b: f64x2 = f64x2::new(6., 4.);
22920 let c: f64x2 = f64x2::new(2., 0.);
22921 let e: f64x2 = f64x2::new(2., 3.);
22922 let r: f64x2 = transmute(vfmsq_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22923 assert_eq!(r, e);
22924 }
22925
22926 #[simd_test(enable = "neon")]
3c0e092e
XL
22927 unsafe fn test_vfmss_lane_f32() {
22928 let a: f32 = 14.;
22929 let b: f32 = 6.;
22930 let c: f32x2 = f32x2::new(2., 0.);
22931 let e: f32 = 2.;
22932 let r: f32 = transmute(vfmss_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22933 assert_eq!(r, e);
22934 }
22935
22936 #[simd_test(enable = "neon")]
3c0e092e
XL
22937 unsafe fn test_vfmss_laneq_f32() {
22938 let a: f32 = 14.;
22939 let b: f32 = 6.;
22940 let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22941 let e: f32 = 2.;
22942 let r: f32 = transmute(vfmss_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22943 assert_eq!(r, e);
22944 }
22945
22946 #[simd_test(enable = "neon")]
3c0e092e
XL
22947 unsafe fn test_vfmsd_lane_f64() {
22948 let a: f64 = 14.;
22949 let b: f64 = 6.;
22950 let c: f64 = 2.;
22951 let e: f64 = 2.;
22952 let r: f64 = transmute(vfmsd_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22953 assert_eq!(r, e);
22954 }
22955
22956 #[simd_test(enable = "neon")]
3c0e092e
XL
22957 unsafe fn test_vfmsd_laneq_f64() {
22958 let a: f64 = 14.;
22959 let b: f64 = 6.;
22960 let c: f64x2 = f64x2::new(2., 0.);
22961 let e: f64 = 2.;
22962 let r: f64 = transmute(vfmsd_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
22963 assert_eq!(r, e);
22964 }
22965
22966 #[simd_test(enable = "neon")]
3c0e092e
XL
22967 unsafe fn test_vdiv_f32() {
22968 let a: f32x2 = f32x2::new(2.0, 6.0);
22969 let b: f32x2 = f32x2::new(1.0, 2.0);
22970 let e: f32x2 = f32x2::new(2.0, 3.0);
22971 let r: f32x2 = transmute(vdiv_f32(transmute(a), transmute(b)));
17df50a5
XL
22972 assert_eq!(r, e);
22973 }
22974
22975 #[simd_test(enable = "neon")]
3c0e092e
XL
22976 unsafe fn test_vdivq_f32() {
22977 let a: f32x4 = f32x4::new(2.0, 6.0, 4.0, 10.0);
22978 let b: f32x4 = f32x4::new(1.0, 2.0, 1.0, 2.0);
22979 let e: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
22980 let r: f32x4 = transmute(vdivq_f32(transmute(a), transmute(b)));
17df50a5
XL
22981 assert_eq!(r, e);
22982 }
22983
22984 #[simd_test(enable = "neon")]
3c0e092e
XL
22985 unsafe fn test_vdiv_f64() {
22986 let a: f64 = 2.0;
22987 let b: f64 = 1.0;
22988 let e: f64 = 2.0;
22989 let r: f64 = transmute(vdiv_f64(transmute(a), transmute(b)));
17df50a5
XL
22990 assert_eq!(r, e);
22991 }
22992
22993 #[simd_test(enable = "neon")]
3c0e092e
XL
22994 unsafe fn test_vdivq_f64() {
22995 let a: f64x2 = f64x2::new(2.0, 6.0);
22996 let b: f64x2 = f64x2::new(1.0, 2.0);
22997 let e: f64x2 = f64x2::new(2.0, 3.0);
22998 let r: f64x2 = transmute(vdivq_f64(transmute(a), transmute(b)));
22999 assert_eq!(r, e);
23000 }
23001
23002 #[simd_test(enable = "neon")]
23003 unsafe fn test_vsub_f64() {
23004 let a: f64 = 1.0;
23005 let b: f64 = 1.0;
23006 let e: f64 = 0.0;
23007 let r: f64 = transmute(vsub_f64(transmute(a), transmute(b)));
17df50a5
XL
23008 assert_eq!(r, e);
23009 }
23010
23011 #[simd_test(enable = "neon")]
3c0e092e
XL
23012 unsafe fn test_vsubq_f64() {
23013 let a: f64x2 = f64x2::new(1.0, 4.0);
23014 let b: f64x2 = f64x2::new(1.0, 2.0);
23015 let e: f64x2 = f64x2::new(0.0, 2.0);
23016 let r: f64x2 = transmute(vsubq_f64(transmute(a), transmute(b)));
17df50a5
XL
23017 assert_eq!(r, e);
23018 }
23019
23020 #[simd_test(enable = "neon")]
3c0e092e
XL
23021 unsafe fn test_vsubd_s64() {
23022 let a: i64 = 3;
23023 let b: i64 = 2;
23024 let e: i64 = 1;
23025 let r: i64 = transmute(vsubd_s64(transmute(a), transmute(b)));
17df50a5
XL
23026 assert_eq!(r, e);
23027 }
23028
23029 #[simd_test(enable = "neon")]
3c0e092e
XL
23030 unsafe fn test_vsubd_u64() {
23031 let a: u64 = 3;
23032 let b: u64 = 2;
23033 let e: u64 = 1;
23034 let r: u64 = transmute(vsubd_u64(transmute(a), transmute(b)));
17df50a5
XL
23035 assert_eq!(r, e);
23036 }
23037
23038 #[simd_test(enable = "neon")]
3c0e092e
XL
23039 unsafe fn test_vaddd_s64() {
23040 let a: i64 = 1;
23041 let b: i64 = 2;
23042 let e: i64 = 3;
23043 let r: i64 = transmute(vaddd_s64(transmute(a), transmute(b)));
17df50a5
XL
23044 assert_eq!(r, e);
23045 }
23046
23047 #[simd_test(enable = "neon")]
3c0e092e
XL
23048 unsafe fn test_vaddd_u64() {
23049 let a: u64 = 1;
23050 let b: u64 = 2;
23051 let e: u64 = 3;
23052 let r: u64 = transmute(vaddd_u64(transmute(a), transmute(b)));
17df50a5
XL
23053 assert_eq!(r, e);
23054 }
23055
23056 #[simd_test(enable = "neon")]
3c0e092e
XL
23057 unsafe fn test_vaddv_f32() {
23058 let a: f32x2 = f32x2::new(1., 2.);
23059 let e: f32 = 3.;
23060 let r: f32 = transmute(vaddv_f32(transmute(a)));
17df50a5
XL
23061 assert_eq!(r, e);
23062 }
23063
23064 #[simd_test(enable = "neon")]
3c0e092e
XL
23065 unsafe fn test_vaddvq_f32() {
23066 let a: f32x4 = f32x4::new(1., 2., 0., 0.);
23067 let e: f32 = 3.;
23068 let r: f32 = transmute(vaddvq_f32(transmute(a)));
17df50a5
XL
23069 assert_eq!(r, e);
23070 }
23071
23072 #[simd_test(enable = "neon")]
3c0e092e 23073 unsafe fn test_vaddvq_f64() {
17df50a5 23074 let a: f64x2 = f64x2::new(1., 2.);
3c0e092e
XL
23075 let e: f64 = 3.;
23076 let r: f64 = transmute(vaddvq_f64(transmute(a)));
17df50a5
XL
23077 assert_eq!(r, e);
23078 }
23079
23080 #[simd_test(enable = "neon")]
3c0e092e
XL
23081 unsafe fn test_vaddlv_s16() {
23082 let a: i16x4 = i16x4::new(1, 2, 3, 4);
23083 let e: i32 = 10;
23084 let r: i32 = transmute(vaddlv_s16(transmute(a)));
17df50a5
XL
23085 assert_eq!(r, e);
23086 }
23087
23088 #[simd_test(enable = "neon")]
3c0e092e
XL
23089 unsafe fn test_vaddlvq_s16() {
23090 let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23091 let e: i32 = 36;
23092 let r: i32 = transmute(vaddlvq_s16(transmute(a)));
17df50a5
XL
23093 assert_eq!(r, e);
23094 }
23095
23096 #[simd_test(enable = "neon")]
3c0e092e
XL
23097 unsafe fn test_vaddlv_s32() {
23098 let a: i32x2 = i32x2::new(1, 2);
23099 let e: i64 = 3;
23100 let r: i64 = transmute(vaddlv_s32(transmute(a)));
17df50a5
XL
23101 assert_eq!(r, e);
23102 }
23103
23104 #[simd_test(enable = "neon")]
3c0e092e
XL
23105 unsafe fn test_vaddlvq_s32() {
23106 let a: i32x4 = i32x4::new(1, 2, 3, 4);
23107 let e: i64 = 10;
23108 let r: i64 = transmute(vaddlvq_s32(transmute(a)));
17df50a5
XL
23109 assert_eq!(r, e);
23110 }
23111
23112 #[simd_test(enable = "neon")]
3c0e092e
XL
23113 unsafe fn test_vaddlv_u16() {
23114 let a: u16x4 = u16x4::new(1, 2, 3, 4);
23115 let e: u32 = 10;
23116 let r: u32 = transmute(vaddlv_u16(transmute(a)));
17df50a5
XL
23117 assert_eq!(r, e);
23118 }
23119
23120 #[simd_test(enable = "neon")]
3c0e092e
XL
23121 unsafe fn test_vaddlvq_u16() {
23122 let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23123 let e: u32 = 36;
23124 let r: u32 = transmute(vaddlvq_u16(transmute(a)));
17df50a5
XL
23125 assert_eq!(r, e);
23126 }
23127
23128 #[simd_test(enable = "neon")]
3c0e092e
XL
23129 unsafe fn test_vaddlv_u32() {
23130 let a: u32x2 = u32x2::new(1, 2);
23131 let e: u64 = 3;
23132 let r: u64 = transmute(vaddlv_u32(transmute(a)));
17df50a5
XL
23133 assert_eq!(r, e);
23134 }
23135
23136 #[simd_test(enable = "neon")]
3c0e092e
XL
23137 unsafe fn test_vaddlvq_u32() {
23138 let a: u32x4 = u32x4::new(1, 2, 3, 4);
23139 let e: u64 = 10;
23140 let r: u64 = transmute(vaddlvq_u32(transmute(a)));
17df50a5
XL
23141 assert_eq!(r, e);
23142 }
23143
23144 #[simd_test(enable = "neon")]
3c0e092e
XL
23145 unsafe fn test_vsubw_high_s8() {
23146 let a: i16x8 = i16x8::new(8, 9, 10, 12, 13, 14, 15, 16);
23147 let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16);
23148 let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
23149 let r: i16x8 = transmute(vsubw_high_s8(transmute(a), transmute(b)));
17df50a5
XL
23150 assert_eq!(r, e);
23151 }
23152
23153 #[simd_test(enable = "neon")]
3c0e092e
XL
23154 unsafe fn test_vsubw_high_s16() {
23155 let a: i32x4 = i32x4::new(8, 9, 10, 11);
23156 let b: i16x8 = i16x8::new(0, 1, 2, 3, 8, 9, 10, 11);
23157 let e: i32x4 = i32x4::new(0, 0, 0, 0);
23158 let r: i32x4 = transmute(vsubw_high_s16(transmute(a), transmute(b)));
17df50a5
XL
23159 assert_eq!(r, e);
23160 }
23161
23162 #[simd_test(enable = "neon")]
3c0e092e
XL
23163 unsafe fn test_vsubw_high_s32() {
23164 let a: i64x2 = i64x2::new(8, 9);
23165 let b: i32x4 = i32x4::new(6, 7, 8, 9);
23166 let e: i64x2 = i64x2::new(0, 0);
23167 let r: i64x2 = transmute(vsubw_high_s32(transmute(a), transmute(b)));
17df50a5
XL
23168 assert_eq!(r, e);
23169 }
23170
23171 #[simd_test(enable = "neon")]
3c0e092e
XL
23172 unsafe fn test_vsubw_high_u8() {
23173 let a: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23174 let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23175 let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
23176 let r: u16x8 = transmute(vsubw_high_u8(transmute(a), transmute(b)));
17df50a5
XL
23177 assert_eq!(r, e);
23178 }
23179
23180 #[simd_test(enable = "neon")]
3c0e092e
XL
23181 unsafe fn test_vsubw_high_u16() {
23182 let a: u32x4 = u32x4::new(8, 9, 10, 11);
23183 let b: u16x8 = u16x8::new(0, 1, 2, 3, 8, 9, 10, 11);
23184 let e: u32x4 = u32x4::new(0, 0, 0, 0);
23185 let r: u32x4 = transmute(vsubw_high_u16(transmute(a), transmute(b)));
17df50a5
XL
23186 assert_eq!(r, e);
23187 }
23188
23189 #[simd_test(enable = "neon")]
3c0e092e
XL
23190 unsafe fn test_vsubw_high_u32() {
23191 let a: u64x2 = u64x2::new(8, 9);
23192 let b: u32x4 = u32x4::new(6, 7, 8, 9);
23193 let e: u64x2 = u64x2::new(0, 0);
23194 let r: u64x2 = transmute(vsubw_high_u32(transmute(a), transmute(b)));
17df50a5
XL
23195 assert_eq!(r, e);
23196 }
23197
23198 #[simd_test(enable = "neon")]
3c0e092e
XL
23199 unsafe fn test_vsubl_high_s8() {
23200 let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23201 let b: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2);
23202 let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
23203 let r: i16x8 = transmute(vsubl_high_s8(transmute(a), transmute(b)));
17df50a5
XL
23204 assert_eq!(r, e);
23205 }
23206
23207 #[simd_test(enable = "neon")]
3c0e092e
XL
23208 unsafe fn test_vsubl_high_s16() {
23209 let a: i16x8 = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23210 let b: i16x8 = i16x8::new(6, 6, 6, 6, 8, 8, 8, 8);
23211 let e: i32x4 = i32x4::new(4, 5, 6, 7);
23212 let r: i32x4 = transmute(vsubl_high_s16(transmute(a), transmute(b)));
17df50a5
XL
23213 assert_eq!(r, e);
23214 }
23215
23216 #[simd_test(enable = "neon")]
3c0e092e
XL
23217 unsafe fn test_vsubl_high_s32() {
23218 let a: i32x4 = i32x4::new(12, 13, 14, 15);
23219 let b: i32x4 = i32x4::new(6, 6, 8, 8);
23220 let e: i64x2 = i64x2::new(6, 7);
23221 let r: i64x2 = transmute(vsubl_high_s32(transmute(a), transmute(b)));
17df50a5
XL
23222 assert_eq!(r, e);
23223 }
23224
23225 #[simd_test(enable = "neon")]
3c0e092e
XL
23226 unsafe fn test_vsubl_high_u8() {
23227 let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23228 let b: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2);
23229 let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
23230 let r: u16x8 = transmute(vsubl_high_u8(transmute(a), transmute(b)));
17df50a5
XL
23231 assert_eq!(r, e);
23232 }
23233
23234 #[simd_test(enable = "neon")]
3c0e092e
XL
23235 unsafe fn test_vsubl_high_u16() {
23236 let a: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23237 let b: u16x8 = u16x8::new(6, 6, 6, 6, 8, 8, 8, 8);
23238 let e: u32x4 = u32x4::new(4, 5, 6, 7);
23239 let r: u32x4 = transmute(vsubl_high_u16(transmute(a), transmute(b)));
17df50a5
XL
23240 assert_eq!(r, e);
23241 }
23242
23243 #[simd_test(enable = "neon")]
3c0e092e
XL
23244 unsafe fn test_vsubl_high_u32() {
23245 let a: u32x4 = u32x4::new(12, 13, 14, 15);
23246 let b: u32x4 = u32x4::new(6, 6, 8, 8);
23247 let e: u64x2 = u64x2::new(6, 7);
23248 let r: u64x2 = transmute(vsubl_high_u32(transmute(a), transmute(b)));
23249 assert_eq!(r, e);
23250 }
23251
23252 #[simd_test(enable = "neon,sha3")]
23253 unsafe fn test_vbcaxq_s8() {
23254 let a: i8x16 = i8x16::new(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0);
23255 let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23256 let c: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
23257 let e: i8x16 = i8x16::new(1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
23258 let r: i8x16 = transmute(vbcaxq_s8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23259 assert_eq!(r, e);
23260 }
23261
3c0e092e
XL
23262 #[simd_test(enable = "neon,sha3")]
23263 unsafe fn test_vbcaxq_s16() {
23264 let a: i16x8 = i16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
23265 let b: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
23266 let c: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
23267 let e: i16x8 = i16x8::new(1, 0, 3, 2, 5, 4, 7, 6);
23268 let r: i16x8 = transmute(vbcaxq_s16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23269 assert_eq!(r, e);
23270 }
23271
3c0e092e
XL
23272 #[simd_test(enable = "neon,sha3")]
23273 unsafe fn test_vbcaxq_s32() {
23274 let a: i32x4 = i32x4::new(1, 0, 1, 0);
23275 let b: i32x4 = i32x4::new(0, 1, 2, 3);
23276 let c: i32x4 = i32x4::new(1, 1, 1, 1);
23277 let e: i32x4 = i32x4::new(1, 0, 3, 2);
23278 let r: i32x4 = transmute(vbcaxq_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23279 assert_eq!(r, e);
23280 }
23281
3c0e092e
XL
23282 #[simd_test(enable = "neon,sha3")]
23283 unsafe fn test_vbcaxq_s64() {
23284 let a: i64x2 = i64x2::new(1, 0);
23285 let b: i64x2 = i64x2::new(0, 1);
23286 let c: i64x2 = i64x2::new(1, 1);
23287 let e: i64x2 = i64x2::new(1, 0);
23288 let r: i64x2 = transmute(vbcaxq_s64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23289 assert_eq!(r, e);
23290 }
23291
3c0e092e
XL
23292 #[simd_test(enable = "neon,sha3")]
23293 unsafe fn test_vbcaxq_u8() {
23294 let a: u8x16 = u8x16::new(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0);
23295 let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23296 let c: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
23297 let e: u8x16 = u8x16::new(1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
23298 let r: u8x16 = transmute(vbcaxq_u8(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23299 assert_eq!(r, e);
23300 }
23301
3c0e092e
XL
23302 #[simd_test(enable = "neon,sha3")]
23303 unsafe fn test_vbcaxq_u16() {
23304 let a: u16x8 = u16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
23305 let b: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
23306 let c: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
23307 let e: u16x8 = u16x8::new(1, 0, 3, 2, 5, 4, 7, 6);
23308 let r: u16x8 = transmute(vbcaxq_u16(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23309 assert_eq!(r, e);
23310 }
23311
3c0e092e
XL
23312 #[simd_test(enable = "neon,sha3")]
23313 unsafe fn test_vbcaxq_u32() {
23314 let a: u32x4 = u32x4::new(1, 0, 1, 0);
23315 let b: u32x4 = u32x4::new(0, 1, 2, 3);
23316 let c: u32x4 = u32x4::new(1, 1, 1, 1);
23317 let e: u32x4 = u32x4::new(1, 0, 3, 2);
23318 let r: u32x4 = transmute(vbcaxq_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23319 assert_eq!(r, e);
23320 }
23321
3c0e092e
XL
23322 #[simd_test(enable = "neon,sha3")]
23323 unsafe fn test_vbcaxq_u64() {
23324 let a: u64x2 = u64x2::new(1, 0);
23325 let b: u64x2 = u64x2::new(0, 1);
23326 let c: u64x2 = u64x2::new(1, 1);
23327 let e: u64x2 = u64x2::new(1, 0);
23328 let r: u64x2 = transmute(vbcaxq_u64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23329 assert_eq!(r, e);
23330 }
23331
353b0b11 23332 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23333 unsafe fn test_vcadd_rot270_f32() {
23334 let a: f32x2 = f32x2::new(1., -1.);
23335 let b: f32x2 = f32x2::new(-1., 1.);
23336 let e: f32x2 = f32x2::new(2., 0.);
23337 let r: f32x2 = transmute(vcadd_rot270_f32(transmute(a), transmute(b)));
17df50a5
XL
23338 assert_eq!(r, e);
23339 }
23340
353b0b11 23341 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23342 unsafe fn test_vcaddq_rot270_f32() {
23343 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23344 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23345 let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23346 let r: f32x4 = transmute(vcaddq_rot270_f32(transmute(a), transmute(b)));
17df50a5
XL
23347 assert_eq!(r, e);
23348 }
23349
353b0b11 23350 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23351 unsafe fn test_vcaddq_rot270_f64() {
23352 let a: f64x2 = f64x2::new(1., -1.);
23353 let b: f64x2 = f64x2::new(-1., 1.);
23354 let e: f64x2 = f64x2::new(2., 0.);
23355 let r: f64x2 = transmute(vcaddq_rot270_f64(transmute(a), transmute(b)));
17df50a5
XL
23356 assert_eq!(r, e);
23357 }
23358
353b0b11 23359 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23360 unsafe fn test_vcadd_rot90_f32() {
23361 let a: f32x2 = f32x2::new(1., -1.);
23362 let b: f32x2 = f32x2::new(-1., 1.);
23363 let e: f32x2 = f32x2::new(0., -2.);
23364 let r: f32x2 = transmute(vcadd_rot90_f32(transmute(a), transmute(b)));
17df50a5
XL
23365 assert_eq!(r, e);
23366 }
23367
353b0b11 23368 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23369 unsafe fn test_vcaddq_rot90_f32() {
23370 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23371 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23372 let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23373 let r: f32x4 = transmute(vcaddq_rot90_f32(transmute(a), transmute(b)));
17df50a5
XL
23374 assert_eq!(r, e);
23375 }
23376
353b0b11 23377 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23378 unsafe fn test_vcaddq_rot90_f64() {
23379 let a: f64x2 = f64x2::new(1., -1.);
23380 let b: f64x2 = f64x2::new(-1., 1.);
23381 let e: f64x2 = f64x2::new(0., -2.);
23382 let r: f64x2 = transmute(vcaddq_rot90_f64(transmute(a), transmute(b)));
17df50a5
XL
23383 assert_eq!(r, e);
23384 }
23385
353b0b11 23386 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23387 unsafe fn test_vcmla_f32() {
23388 let a: f32x2 = f32x2::new(1., -1.);
23389 let b: f32x2 = f32x2::new(-1., 1.);
23390 let c: f32x2 = f32x2::new(1., 1.);
23391 let e: f32x2 = f32x2::new(0., -2.);
23392 let r: f32x2 = transmute(vcmla_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23393 assert_eq!(r, e);
23394 }
23395
353b0b11 23396 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23397 unsafe fn test_vcmlaq_f32() {
23398 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23399 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23400 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23401 let e: f32x4 = f32x4::new(0., -2., 2., 0.);
23402 let r: f32x4 = transmute(vcmlaq_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23403 assert_eq!(r, e);
23404 }
23405
353b0b11 23406 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23407 unsafe fn test_vcmlaq_f64() {
23408 let a: f64x2 = f64x2::new(1., -1.);
23409 let b: f64x2 = f64x2::new(-1., 1.);
23410 let c: f64x2 = f64x2::new(1., 1.);
23411 let e: f64x2 = f64x2::new(0., -2.);
23412 let r: f64x2 = transmute(vcmlaq_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23413 assert_eq!(r, e);
23414 }
23415
353b0b11 23416 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23417 unsafe fn test_vcmla_rot90_f32() {
23418 let a: f32x2 = f32x2::new(1., 1.);
23419 let b: f32x2 = f32x2::new(1., -1.);
23420 let c: f32x2 = f32x2::new(1., 1.);
23421 let e: f32x2 = f32x2::new(2., 0.);
23422 let r: f32x2 = transmute(vcmla_rot90_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23423 assert_eq!(r, e);
23424 }
23425
353b0b11 23426 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23427 unsafe fn test_vcmlaq_rot90_f32() {
23428 let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23429 let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23430 let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23431 let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23432 let r: f32x4 = transmute(vcmlaq_rot90_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23433 assert_eq!(r, e);
23434 }
23435
353b0b11 23436 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23437 unsafe fn test_vcmlaq_rot90_f64() {
23438 let a: f64x2 = f64x2::new(1., 1.);
23439 let b: f64x2 = f64x2::new(1., -1.);
23440 let c: f64x2 = f64x2::new(1., 1.);
23441 let e: f64x2 = f64x2::new(2., 0.);
23442 let r: f64x2 = transmute(vcmlaq_rot90_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23443 assert_eq!(r, e);
23444 }
23445
353b0b11 23446 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23447 unsafe fn test_vcmla_rot180_f32() {
23448 let a: f32x2 = f32x2::new(1., 1.);
23449 let b: f32x2 = f32x2::new(1., -1.);
23450 let c: f32x2 = f32x2::new(1., 1.);
23451 let e: f32x2 = f32x2::new(0., 0.);
23452 let r: f32x2 = transmute(vcmla_rot180_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23453 assert_eq!(r, e);
23454 }
23455
353b0b11 23456 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23457 unsafe fn test_vcmlaq_rot180_f32() {
23458 let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23459 let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23460 let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23461 let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23462 let r: f32x4 = transmute(vcmlaq_rot180_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23463 assert_eq!(r, e);
23464 }
23465
353b0b11 23466 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23467 unsafe fn test_vcmlaq_rot180_f64() {
23468 let a: f64x2 = f64x2::new(1., 1.);
23469 let b: f64x2 = f64x2::new(1., -1.);
23470 let c: f64x2 = f64x2::new(1., 1.);
23471 let e: f64x2 = f64x2::new(0., 0.);
23472 let r: f64x2 = transmute(vcmlaq_rot180_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23473 assert_eq!(r, e);
23474 }
23475
353b0b11 23476 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23477 unsafe fn test_vcmla_rot270_f32() {
23478 let a: f32x2 = f32x2::new(1., 1.);
23479 let b: f32x2 = f32x2::new(1., -1.);
23480 let c: f32x2 = f32x2::new(1., 1.);
23481 let e: f32x2 = f32x2::new(0., 2.);
23482 let r: f32x2 = transmute(vcmla_rot270_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23483 assert_eq!(r, e);
23484 }
23485
353b0b11 23486 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23487 unsafe fn test_vcmlaq_rot270_f32() {
23488 let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23489 let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23490 let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23491 let e: f32x4 = f32x4::new(0., 2., 0., 2.);
23492 let r: f32x4 = transmute(vcmlaq_rot270_f32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23493 assert_eq!(r, e);
23494 }
23495
353b0b11 23496 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23497 unsafe fn test_vcmlaq_rot270_f64() {
23498 let a: f64x2 = f64x2::new(1., 1.);
23499 let b: f64x2 = f64x2::new(1., -1.);
23500 let c: f64x2 = f64x2::new(1., 1.);
23501 let e: f64x2 = f64x2::new(0., 2.);
23502 let r: f64x2 = transmute(vcmlaq_rot270_f64(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23503 assert_eq!(r, e);
23504 }
23505
353b0b11 23506 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23507 unsafe fn test_vcmla_lane_f32() {
23508 let a: f32x2 = f32x2::new(1., -1.);
23509 let b: f32x2 = f32x2::new(-1., 1.);
23510 let c: f32x2 = f32x2::new(1., 1.);
23511 let e: f32x2 = f32x2::new(0., -2.);
23512 let r: f32x2 = transmute(vcmla_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23513 assert_eq!(r, e);
23514 }
23515
353b0b11 23516 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23517 unsafe fn test_vcmla_laneq_f32() {
23518 let a: f32x2 = f32x2::new(1., -1.);
23519 let b: f32x2 = f32x2::new(-1., 1.);
23520 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23521 let e: f32x2 = f32x2::new(0., -2.);
23522 let r: f32x2 = transmute(vcmla_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23523 assert_eq!(r, e);
23524 }
23525
353b0b11 23526 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23527 unsafe fn test_vcmlaq_lane_f32() {
23528 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23529 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23530 let c: f32x2 = f32x2::new(1., 1.);
23531 let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23532 let r: f32x4 = transmute(vcmlaq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23533 assert_eq!(r, e);
23534 }
23535
353b0b11 23536 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23537 unsafe fn test_vcmlaq_laneq_f32() {
23538 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23539 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23540 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23541 let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23542 let r: f32x4 = transmute(vcmlaq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23543 assert_eq!(r, e);
23544 }
23545
353b0b11 23546 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23547 unsafe fn test_vcmla_rot90_lane_f32() {
23548 let a: f32x2 = f32x2::new(1., -1.);
23549 let b: f32x2 = f32x2::new(-1., 1.);
23550 let c: f32x2 = f32x2::new(1., 1.);
23551 let e: f32x2 = f32x2::new(0., 0.);
23552 let r: f32x2 = transmute(vcmla_rot90_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23553 assert_eq!(r, e);
23554 }
23555
353b0b11 23556 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23557 unsafe fn test_vcmla_rot90_laneq_f32() {
23558 let a: f32x2 = f32x2::new(1., -1.);
23559 let b: f32x2 = f32x2::new(-1., 1.);
23560 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23561 let e: f32x2 = f32x2::new(0., 0.);
23562 let r: f32x2 = transmute(vcmla_rot90_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23563 assert_eq!(r, e);
23564 }
23565
353b0b11 23566 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23567 unsafe fn test_vcmlaq_rot90_lane_f32() {
23568 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23569 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23570 let c: f32x2 = f32x2::new(1., 1.);
23571 let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23572 let r: f32x4 = transmute(vcmlaq_rot90_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23573 assert_eq!(r, e);
23574 }
23575
353b0b11 23576 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23577 unsafe fn test_vcmlaq_rot90_laneq_f32() {
23578 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23579 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23580 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23581 let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23582 let r: f32x4 = transmute(vcmlaq_rot90_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23583 assert_eq!(r, e);
23584 }
23585
353b0b11 23586 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23587 unsafe fn test_vcmla_rot180_lane_f32() {
23588 let a: f32x2 = f32x2::new(1., -1.);
23589 let b: f32x2 = f32x2::new(-1., 1.);
23590 let c: f32x2 = f32x2::new(1., 1.);
23591 let e: f32x2 = f32x2::new(2., 0.);
23592 let r: f32x2 = transmute(vcmla_rot180_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23593 assert_eq!(r, e);
23594 }
23595
353b0b11 23596 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23597 unsafe fn test_vcmla_rot180_laneq_f32() {
23598 let a: f32x2 = f32x2::new(1., -1.);
23599 let b: f32x2 = f32x2::new(-1., 1.);
23600 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23601 let e: f32x2 = f32x2::new(2., 0.);
23602 let r: f32x2 = transmute(vcmla_rot180_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23603 assert_eq!(r, e);
23604 }
23605
353b0b11 23606 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23607 unsafe fn test_vcmlaq_rot180_lane_f32() {
23608 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23609 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23610 let c: f32x2 = f32x2::new(1., 1.);
23611 let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23612 let r: f32x4 = transmute(vcmlaq_rot180_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23613 assert_eq!(r, e);
23614 }
23615
353b0b11 23616 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23617 unsafe fn test_vcmlaq_rot180_laneq_f32() {
23618 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23619 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23620 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23621 let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23622 let r: f32x4 = transmute(vcmlaq_rot180_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23623 assert_eq!(r, e);
23624 }
23625
353b0b11 23626 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23627 unsafe fn test_vcmla_rot270_lane_f32() {
23628 let a: f32x2 = f32x2::new(1., -1.);
23629 let b: f32x2 = f32x2::new(-1., 1.);
23630 let c: f32x2 = f32x2::new(1., 1.);
23631 let e: f32x2 = f32x2::new(2., -2.);
23632 let r: f32x2 = transmute(vcmla_rot270_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23633 assert_eq!(r, e);
23634 }
23635
353b0b11 23636 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23637 unsafe fn test_vcmla_rot270_laneq_f32() {
23638 let a: f32x2 = f32x2::new(1., -1.);
23639 let b: f32x2 = f32x2::new(-1., 1.);
23640 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23641 let e: f32x2 = f32x2::new(2., -2.);
23642 let r: f32x2 = transmute(vcmla_rot270_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23643 assert_eq!(r, e);
23644 }
23645
353b0b11 23646 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23647 unsafe fn test_vcmlaq_rot270_lane_f32() {
23648 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23649 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23650 let c: f32x2 = f32x2::new(1., 1.);
23651 let e: f32x4 = f32x4::new(2., -2., 2., -2.);
23652 let r: f32x4 = transmute(vcmlaq_rot270_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23653 assert_eq!(r, e);
23654 }
23655
353b0b11 23656 #[simd_test(enable = "neon,fcma")]
3c0e092e
XL
23657 unsafe fn test_vcmlaq_rot270_laneq_f32() {
23658 let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23659 let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23660 let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23661 let e: f32x4 = f32x4::new(2., -2., 2., -2.);
23662 let r: f32x4 = transmute(vcmlaq_rot270_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23663 assert_eq!(r, e);
23664 }
23665
353b0b11 23666 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23667 unsafe fn test_vdot_s32() {
23668 let a: i32x2 = i32x2::new(1, 2);
23669 let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23670 let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23671 let e: i32x2 = i32x2::new(31, 176);
23672 let r: i32x2 = transmute(vdot_s32(transmute(a), transmute(b), transmute(c)));
23673 assert_eq!(r, e);
23674 }
23675
353b0b11 23676 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23677 unsafe fn test_vdotq_s32() {
23678 let a: i32x4 = i32x4::new(1, 2, 1, 2);
23679 let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23680 let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23681 let e: i32x4 = i32x4::new(31, 176, 31, 176);
23682 let r: i32x4 = transmute(vdotq_s32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23683 assert_eq!(r, e);
23684 }
23685
353b0b11 23686 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23687 unsafe fn test_vdot_u32() {
23688 let a: u32x2 = u32x2::new(1, 2);
23689 let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23690 let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23691 let e: u32x2 = u32x2::new(31, 176);
23692 let r: u32x2 = transmute(vdot_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23693 assert_eq!(r, e);
23694 }
23695
353b0b11 23696 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23697 unsafe fn test_vdotq_u32() {
23698 let a: u32x4 = u32x4::new(1, 2, 1, 2);
23699 let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23700 let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23701 let e: u32x4 = u32x4::new(31, 176, 31, 176);
23702 let r: u32x4 = transmute(vdotq_u32(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23703 assert_eq!(r, e);
23704 }
23705
353b0b11 23706 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23707 unsafe fn test_vdot_lane_s32() {
23708 let a: i32x2 = i32x2::new(1, 2);
23709 let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23710 let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23711 let e: i32x2 = i32x2::new(31, 72);
23712 let r: i32x2 = transmute(vdot_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23713 assert_eq!(r, e);
23714 }
23715
353b0b11 23716 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23717 unsafe fn test_vdot_laneq_s32() {
23718 let a: i32x2 = i32x2::new(1, 2);
23719 let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23720 let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23721 let e: i32x2 = i32x2::new(31, 72);
23722 let r: i32x2 = transmute(vdot_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23723 assert_eq!(r, e);
23724 }
23725
353b0b11 23726 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23727 unsafe fn test_vdotq_lane_s32() {
23728 let a: i32x4 = i32x4::new(1, 2, 1, 2);
23729 let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23730 let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23731 let e: i32x4 = i32x4::new(31, 72, 31, 72);
23732 let r: i32x4 = transmute(vdotq_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23733 assert_eq!(r, e);
23734 }
23735
353b0b11 23736 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23737 unsafe fn test_vdotq_laneq_s32() {
23738 let a: i32x4 = i32x4::new(1, 2, 1, 2);
23739 let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23740 let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23741 let e: i32x4 = i32x4::new(31, 72, 31, 72);
23742 let r: i32x4 = transmute(vdotq_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23743 assert_eq!(r, e);
23744 }
23745
353b0b11 23746 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23747 unsafe fn test_vdot_lane_u32() {
23748 let a: u32x2 = u32x2::new(1, 2);
23749 let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23750 let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23751 let e: u32x2 = u32x2::new(31, 72);
23752 let r: u32x2 = transmute(vdot_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23753 assert_eq!(r, e);
23754 }
23755
353b0b11 23756 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23757 unsafe fn test_vdot_laneq_u32() {
23758 let a: u32x2 = u32x2::new(1, 2);
23759 let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23760 let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23761 let e: u32x2 = u32x2::new(31, 72);
23762 let r: u32x2 = transmute(vdot_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23763 assert_eq!(r, e);
23764 }
23765
353b0b11 23766 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23767 unsafe fn test_vdotq_lane_u32() {
23768 let a: u32x4 = u32x4::new(1, 2, 1, 2);
23769 let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23770 let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23771 let e: u32x4 = u32x4::new(31, 72, 31, 72);
23772 let r: u32x4 = transmute(vdotq_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23773 assert_eq!(r, e);
23774 }
23775
353b0b11 23776 #[simd_test(enable = "neon,dotprod")]
3c0e092e
XL
23777 unsafe fn test_vdotq_laneq_u32() {
23778 let a: u32x4 = u32x4::new(1, 2, 1, 2);
23779 let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23780 let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23781 let e: u32x4 = u32x4::new(31, 72, 31, 72);
23782 let r: u32x4 = transmute(vdotq_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
23783 assert_eq!(r, e);
23784 }
23785
23786 #[simd_test(enable = "neon")]
23787 unsafe fn test_vmax_f64() {
23788 let a: f64 = 1.0;
23789 let b: f64 = 0.0;
23790 let e: f64 = 1.0;
23791 let r: f64 = transmute(vmax_f64(transmute(a), transmute(b)));
23792 assert_eq!(r, e);
23793 }
23794
23795 #[simd_test(enable = "neon")]
23796 unsafe fn test_vmaxq_f64() {
23797 let a: f64x2 = f64x2::new(1.0, -2.0);
23798 let b: f64x2 = f64x2::new(0.0, 3.0);
23799 let e: f64x2 = f64x2::new(1.0, 3.0);
23800 let r: f64x2 = transmute(vmaxq_f64(transmute(a), transmute(b)));
23801 assert_eq!(r, e);
23802 }
23803
23804 #[simd_test(enable = "neon")]
23805 unsafe fn test_vmaxnm_f64() {
23806 let a: f64 = 1.0;
23807 let b: f64 = 8.0;
23808 let e: f64 = 8.0;
23809 let r: f64 = transmute(vmaxnm_f64(transmute(a), transmute(b)));
23810 assert_eq!(r, e);
23811 }
23812
23813 #[simd_test(enable = "neon")]
23814 unsafe fn test_vmaxnmq_f64() {
23815 let a: f64x2 = f64x2::new(1.0, 2.0);
23816 let b: f64x2 = f64x2::new(8.0, 16.0);
23817 let e: f64x2 = f64x2::new(8.0, 16.0);
23818 let r: f64x2 = transmute(vmaxnmq_f64(transmute(a), transmute(b)));
23819 assert_eq!(r, e);
23820 }
23821
3c0e092e
XL
23822 #[simd_test(enable = "neon")]
23823 unsafe fn test_vmaxnmv_f32() {
23824 let a: f32x2 = f32x2::new(1., 2.);
23825 let e: f32 = 2.;
23826 let r: f32 = transmute(vmaxnmv_f32(transmute(a)));
23827 assert_eq!(r, e);
23828 }
23829
23830 #[simd_test(enable = "neon")]
23831 unsafe fn test_vmaxnmvq_f64() {
23832 let a: f64x2 = f64x2::new(1., 2.);
23833 let e: f64 = 2.;
23834 let r: f64 = transmute(vmaxnmvq_f64(transmute(a)));
23835 assert_eq!(r, e);
23836 }
23837
23838 #[simd_test(enable = "neon")]
23839 unsafe fn test_vmaxnmvq_f32() {
23840 let a: f32x4 = f32x4::new(1., 2., 0., 1.);
23841 let e: f32 = 2.;
23842 let r: f32 = transmute(vmaxnmvq_f32(transmute(a)));
23843 assert_eq!(r, e);
23844 }
23845
17df50a5
XL
23846 #[simd_test(enable = "neon")]
23847 unsafe fn test_vpmaxnm_f32() {
23848 let a: f32x2 = f32x2::new(1.0, 2.0);
23849 let b: f32x2 = f32x2::new(6.0, -3.0);
23850 let e: f32x2 = f32x2::new(2.0, 6.0);
23851 let r: f32x2 = transmute(vpmaxnm_f32(transmute(a), transmute(b)));
23852 assert_eq!(r, e);
23853 }
23854
23855 #[simd_test(enable = "neon")]
23856 unsafe fn test_vpmaxnmq_f64() {
23857 let a: f64x2 = f64x2::new(1.0, 2.0);
23858 let b: f64x2 = f64x2::new(6.0, -3.0);
23859 let e: f64x2 = f64x2::new(2.0, 6.0);
23860 let r: f64x2 = transmute(vpmaxnmq_f64(transmute(a), transmute(b)));
23861 assert_eq!(r, e);
23862 }
23863
23864 #[simd_test(enable = "neon")]
23865 unsafe fn test_vpmaxnmq_f32() {
23866 let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
23867 let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
23868 let e: f32x4 = f32x4::new(2.0, 3.0, 16.0, 6.0);
23869 let r: f32x4 = transmute(vpmaxnmq_f32(transmute(a), transmute(b)));
23870 assert_eq!(r, e);
23871 }
23872
3c0e092e
XL
23873 #[simd_test(enable = "neon")]
23874 unsafe fn test_vpmaxnms_f32() {
23875 let a: f32x2 = f32x2::new(1., 2.);
23876 let e: f32 = 2.;
23877 let r: f32 = transmute(vpmaxnms_f32(transmute(a)));
23878 assert_eq!(r, e);
23879 }
23880
23881 #[simd_test(enable = "neon")]
23882 unsafe fn test_vpmaxnmqd_f64() {
23883 let a: f64x2 = f64x2::new(1., 2.);
23884 let e: f64 = 2.;
23885 let r: f64 = transmute(vpmaxnmqd_f64(transmute(a)));
23886 assert_eq!(r, e);
23887 }
23888
23889 #[simd_test(enable = "neon")]
23890 unsafe fn test_vpmaxs_f32() {
23891 let a: f32x2 = f32x2::new(1., 2.);
23892 let e: f32 = 2.;
23893 let r: f32 = transmute(vpmaxs_f32(transmute(a)));
23894 assert_eq!(r, e);
23895 }
23896
23897 #[simd_test(enable = "neon")]
23898 unsafe fn test_vpmaxqd_f64() {
23899 let a: f64x2 = f64x2::new(1., 2.);
23900 let e: f64 = 2.;
23901 let r: f64 = transmute(vpmaxqd_f64(transmute(a)));
23902 assert_eq!(r, e);
23903 }
23904
17df50a5
XL
23905 #[simd_test(enable = "neon")]
23906 unsafe fn test_vmin_f64() {
23907 let a: f64 = 1.0;
23908 let b: f64 = 0.0;
23909 let e: f64 = 0.0;
23910 let r: f64 = transmute(vmin_f64(transmute(a), transmute(b)));
23911 assert_eq!(r, e);
23912 }
23913
23914 #[simd_test(enable = "neon")]
23915 unsafe fn test_vminq_f64() {
23916 let a: f64x2 = f64x2::new(1.0, -2.0);
23917 let b: f64x2 = f64x2::new(0.0, 3.0);
23918 let e: f64x2 = f64x2::new(0.0, -2.0);
23919 let r: f64x2 = transmute(vminq_f64(transmute(a), transmute(b)));
23920 assert_eq!(r, e);
23921 }
23922
23923 #[simd_test(enable = "neon")]
23924 unsafe fn test_vminnm_f64() {
23925 let a: f64 = 1.0;
23926 let b: f64 = 8.0;
23927 let e: f64 = 1.0;
23928 let r: f64 = transmute(vminnm_f64(transmute(a), transmute(b)));
23929 assert_eq!(r, e);
23930 }
23931
23932 #[simd_test(enable = "neon")]
23933 unsafe fn test_vminnmq_f64() {
23934 let a: f64x2 = f64x2::new(1.0, 2.0);
23935 let b: f64x2 = f64x2::new(8.0, 16.0);
23936 let e: f64x2 = f64x2::new(1.0, 2.0);
23937 let r: f64x2 = transmute(vminnmq_f64(transmute(a), transmute(b)));
23938 assert_eq!(r, e);
23939 }
23940
3c0e092e
XL
23941 #[simd_test(enable = "neon")]
23942 unsafe fn test_vminnmv_f32() {
23943 let a: f32x2 = f32x2::new(1., 0.);
23944 let e: f32 = 0.;
23945 let r: f32 = transmute(vminnmv_f32(transmute(a)));
23946 assert_eq!(r, e);
23947 }
23948
23949 #[simd_test(enable = "neon")]
23950 unsafe fn test_vminnmvq_f64() {
23951 let a: f64x2 = f64x2::new(1., 0.);
23952 let e: f64 = 0.;
23953 let r: f64 = transmute(vminnmvq_f64(transmute(a)));
23954 assert_eq!(r, e);
23955 }
23956
23957 #[simd_test(enable = "neon")]
23958 unsafe fn test_vminnmvq_f32() {
23959 let a: f32x4 = f32x4::new(1., 0., 2., 3.);
23960 let e: f32 = 0.;
23961 let r: f32 = transmute(vminnmvq_f32(transmute(a)));
23962 assert_eq!(r, e);
23963 }
23964
23965 #[simd_test(enable = "neon")]
23966 unsafe fn test_vmovl_high_s8() {
23967 let a: i8x16 = i8x16::new(1, 2, 3, 4, 3, 4, 5, 6, 3, 4, 5, 6, 7, 8, 9, 10);
23968 let e: i16x8 = i16x8::new(3, 4, 5, 6, 7, 8, 9, 10);
23969 let r: i16x8 = transmute(vmovl_high_s8(transmute(a)));
23970 assert_eq!(r, e);
23971 }
23972
23973 #[simd_test(enable = "neon")]
23974 unsafe fn test_vmovl_high_s16() {
23975 let a: i16x8 = i16x8::new(1, 2, 3, 4, 3, 4, 5, 6);
23976 let e: i32x4 = i32x4::new(3, 4, 5, 6);
23977 let r: i32x4 = transmute(vmovl_high_s16(transmute(a)));
23978 assert_eq!(r, e);
23979 }
23980
23981 #[simd_test(enable = "neon")]
23982 unsafe fn test_vmovl_high_s32() {
23983 let a: i32x4 = i32x4::new(1, 2, 3, 4);
23984 let e: i64x2 = i64x2::new(3, 4);
23985 let r: i64x2 = transmute(vmovl_high_s32(transmute(a)));
23986 assert_eq!(r, e);
23987 }
23988
23989 #[simd_test(enable = "neon")]
23990 unsafe fn test_vmovl_high_u8() {
23991 let a: u8x16 = u8x16::new(1, 2, 3, 4, 3, 4, 5, 6, 3, 4, 5, 6, 7, 8, 9, 10);
23992 let e: u16x8 = u16x8::new(3, 4, 5, 6, 7, 8, 9, 10);
23993 let r: u16x8 = transmute(vmovl_high_u8(transmute(a)));
23994 assert_eq!(r, e);
23995 }
23996
23997 #[simd_test(enable = "neon")]
23998 unsafe fn test_vmovl_high_u16() {
23999 let a: u16x8 = u16x8::new(1, 2, 3, 4, 3, 4, 5, 6);
24000 let e: u32x4 = u32x4::new(3, 4, 5, 6);
24001 let r: u32x4 = transmute(vmovl_high_u16(transmute(a)));
24002 assert_eq!(r, e);
24003 }
24004
24005 #[simd_test(enable = "neon")]
24006 unsafe fn test_vmovl_high_u32() {
24007 let a: u32x4 = u32x4::new(1, 2, 3, 4);
24008 let e: u64x2 = u64x2::new(3, 4);
24009 let r: u64x2 = transmute(vmovl_high_u32(transmute(a)));
24010 assert_eq!(r, e);
24011 }
24012
24013 #[simd_test(enable = "neon")]
24014 unsafe fn test_vpaddq_f32() {
24015 let a: f32x4 = f32x4::new(1., 2., 3., 4.);
24016 let b: f32x4 = f32x4::new(3., 4., 5., 6.);
24017 let e: f32x4 = f32x4::new(3., 7., 7., 11.);
24018 let r: f32x4 = transmute(vpaddq_f32(transmute(a), transmute(b)));
24019 assert_eq!(r, e);
24020 }
24021
24022 #[simd_test(enable = "neon")]
24023 unsafe fn test_vpaddq_f64() {
24024 let a: f64x2 = f64x2::new(1., 2.);
24025 let b: f64x2 = f64x2::new(3., 4.);
24026 let e: f64x2 = f64x2::new(3., 7.);
24027 let r: f64x2 = transmute(vpaddq_f64(transmute(a), transmute(b)));
24028 assert_eq!(r, e);
24029 }
24030
24031 #[simd_test(enable = "neon")]
24032 unsafe fn test_vpadds_f32() {
24033 let a: f32x2 = f32x2::new(1., 2.);
24034 let e: f32 = 3.;
24035 let r: f32 = transmute(vpadds_f32(transmute(a)));
24036 assert_eq!(r, e);
24037 }
24038
24039 #[simd_test(enable = "neon")]
24040 unsafe fn test_vpaddd_f64() {
24041 let a: f64x2 = f64x2::new(1., 2.);
24042 let e: f64 = 3.;
24043 let r: f64 = transmute(vpaddd_f64(transmute(a)));
24044 assert_eq!(r, e);
24045 }
24046
17df50a5
XL
24047 #[simd_test(enable = "neon")]
24048 unsafe fn test_vpminnm_f32() {
24049 let a: f32x2 = f32x2::new(1.0, 2.0);
24050 let b: f32x2 = f32x2::new(6.0, -3.0);
24051 let e: f32x2 = f32x2::new(1.0, -3.0);
24052 let r: f32x2 = transmute(vpminnm_f32(transmute(a), transmute(b)));
24053 assert_eq!(r, e);
24054 }
24055
24056 #[simd_test(enable = "neon")]
24057 unsafe fn test_vpminnmq_f64() {
24058 let a: f64x2 = f64x2::new(1.0, 2.0);
24059 let b: f64x2 = f64x2::new(6.0, -3.0);
24060 let e: f64x2 = f64x2::new(1.0, -3.0);
24061 let r: f64x2 = transmute(vpminnmq_f64(transmute(a), transmute(b)));
24062 assert_eq!(r, e);
24063 }
24064
24065 #[simd_test(enable = "neon")]
24066 unsafe fn test_vpminnmq_f32() {
24067 let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
24068 let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
24069 let e: f32x4 = f32x4::new(1.0, -4.0, 8.0, -1.0);
24070 let r: f32x4 = transmute(vpminnmq_f32(transmute(a), transmute(b)));
24071 assert_eq!(r, e);
24072 }
24073
3c0e092e
XL
24074 #[simd_test(enable = "neon")]
24075 unsafe fn test_vpminnms_f32() {
24076 let a: f32x2 = f32x2::new(1., 2.);
24077 let e: f32 = 1.;
24078 let r: f32 = transmute(vpminnms_f32(transmute(a)));
24079 assert_eq!(r, e);
24080 }
24081
24082 #[simd_test(enable = "neon")]
24083 unsafe fn test_vpminnmqd_f64() {
24084 let a: f64x2 = f64x2::new(1., 2.);
24085 let e: f64 = 1.;
24086 let r: f64 = transmute(vpminnmqd_f64(transmute(a)));
24087 assert_eq!(r, e);
24088 }
24089
24090 #[simd_test(enable = "neon")]
24091 unsafe fn test_vpmins_f32() {
24092 let a: f32x2 = f32x2::new(1., 2.);
24093 let e: f32 = 1.;
24094 let r: f32 = transmute(vpmins_f32(transmute(a)));
24095 assert_eq!(r, e);
24096 }
24097
24098 #[simd_test(enable = "neon")]
24099 unsafe fn test_vpminqd_f64() {
24100 let a: f64x2 = f64x2::new(1., 2.);
24101 let e: f64 = 1.;
24102 let r: f64 = transmute(vpminqd_f64(transmute(a)));
24103 assert_eq!(r, e);
24104 }
24105
17df50a5
XL
24106 #[simd_test(enable = "neon")]
24107 unsafe fn test_vqdmullh_s16() {
24108 let a: i16 = 2;
24109 let b: i16 = 3;
24110 let e: i32 = 12;
24111 let r: i32 = transmute(vqdmullh_s16(transmute(a), transmute(b)));
24112 assert_eq!(r, e);
24113 }
24114
24115 #[simd_test(enable = "neon")]
24116 unsafe fn test_vqdmulls_s32() {
24117 let a: i32 = 2;
24118 let b: i32 = 3;
24119 let e: i64 = 12;
24120 let r: i64 = transmute(vqdmulls_s32(transmute(a), transmute(b)));
24121 assert_eq!(r, e);
24122 }
24123
24124 #[simd_test(enable = "neon")]
24125 unsafe fn test_vqdmull_high_s16() {
24126 let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24127 let b: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24128 let e: i32x4 = i32x4::new(40, 60, 84, 112);
24129 let r: i32x4 = transmute(vqdmull_high_s16(transmute(a), transmute(b)));
24130 assert_eq!(r, e);
24131 }
24132
24133 #[simd_test(enable = "neon")]
24134 unsafe fn test_vqdmull_high_s32() {
24135 let a: i32x4 = i32x4::new(0, 1, 4, 5);
24136 let b: i32x4 = i32x4::new(1, 2, 5, 6);
24137 let e: i64x2 = i64x2::new(40, 60);
24138 let r: i64x2 = transmute(vqdmull_high_s32(transmute(a), transmute(b)));
24139 assert_eq!(r, e);
24140 }
24141
24142 #[simd_test(enable = "neon")]
24143 unsafe fn test_vqdmull_high_n_s16() {
24144 let a: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24145 let b: i16 = 2;
24146 let e: i32x4 = i32x4::new(32, 40, 48, 56);
24147 let r: i32x4 = transmute(vqdmull_high_n_s16(transmute(a), transmute(b)));
24148 assert_eq!(r, e);
24149 }
24150
24151 #[simd_test(enable = "neon")]
24152 unsafe fn test_vqdmull_high_n_s32() {
24153 let a: i32x4 = i32x4::new(0, 2, 8, 10);
24154 let b: i32 = 2;
24155 let e: i64x2 = i64x2::new(32, 40);
24156 let r: i64x2 = transmute(vqdmull_high_n_s32(transmute(a), transmute(b)));
24157 assert_eq!(r, e);
24158 }
24159
24160 #[simd_test(enable = "neon")]
24161 unsafe fn test_vqdmull_laneq_s16() {
24162 let a: i16x4 = i16x4::new(1, 2, 3, 4);
24163 let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24164 let e: i32x4 = i32x4::new(4, 8, 12, 16);
24165 let r: i32x4 = transmute(vqdmull_laneq_s16::<4>(transmute(a), transmute(b)));
24166 assert_eq!(r, e);
24167 }
24168
24169 #[simd_test(enable = "neon")]
24170 unsafe fn test_vqdmull_laneq_s32() {
24171 let a: i32x2 = i32x2::new(1, 2);
24172 let b: i32x4 = i32x4::new(0, 2, 2, 0);
24173 let e: i64x2 = i64x2::new(4, 8);
24174 let r: i64x2 = transmute(vqdmull_laneq_s32::<2>(transmute(a), transmute(b)));
24175 assert_eq!(r, e);
24176 }
24177
24178 #[simd_test(enable = "neon")]
24179 unsafe fn test_vqdmullh_lane_s16() {
24180 let a: i16 = 2;
24181 let b: i16x4 = i16x4::new(0, 2, 2, 0);
24182 let e: i32 = 8;
24183 let r: i32 = transmute(vqdmullh_lane_s16::<2>(transmute(a), transmute(b)));
24184 assert_eq!(r, e);
24185 }
24186
24187 #[simd_test(enable = "neon")]
24188 unsafe fn test_vqdmullh_laneq_s16() {
24189 let a: i16 = 2;
24190 let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24191 let e: i32 = 8;
24192 let r: i32 = transmute(vqdmullh_laneq_s16::<4>(transmute(a), transmute(b)));
24193 assert_eq!(r, e);
24194 }
24195
24196 #[simd_test(enable = "neon")]
24197 unsafe fn test_vqdmulls_lane_s32() {
24198 let a: i32 = 2;
24199 let b: i32x2 = i32x2::new(0, 2);
24200 let e: i64 = 8;
24201 let r: i64 = transmute(vqdmulls_lane_s32::<1>(transmute(a), transmute(b)));
24202 assert_eq!(r, e);
24203 }
24204
24205 #[simd_test(enable = "neon")]
24206 unsafe fn test_vqdmulls_laneq_s32() {
24207 let a: i32 = 2;
24208 let b: i32x4 = i32x4::new(0, 2, 2, 0);
24209 let e: i64 = 8;
24210 let r: i64 = transmute(vqdmulls_laneq_s32::<2>(transmute(a), transmute(b)));
24211 assert_eq!(r, e);
24212 }
24213
24214 #[simd_test(enable = "neon")]
24215 unsafe fn test_vqdmull_high_lane_s16() {
24216 let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24217 let b: i16x4 = i16x4::new(0, 2, 2, 0);
24218 let e: i32x4 = i32x4::new(16, 20, 24, 28);
24219 let r: i32x4 = transmute(vqdmull_high_lane_s16::<2>(transmute(a), transmute(b)));
24220 assert_eq!(r, e);
24221 }
24222
24223 #[simd_test(enable = "neon")]
24224 unsafe fn test_vqdmull_high_lane_s32() {
24225 let a: i32x4 = i32x4::new(0, 1, 4, 5);
24226 let b: i32x2 = i32x2::new(0, 2);
24227 let e: i64x2 = i64x2::new(16, 20);
24228 let r: i64x2 = transmute(vqdmull_high_lane_s32::<1>(transmute(a), transmute(b)));
24229 assert_eq!(r, e);
24230 }
24231
24232 #[simd_test(enable = "neon")]
24233 unsafe fn test_vqdmull_high_laneq_s16() {
24234 let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24235 let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24236 let e: i32x4 = i32x4::new(16, 20, 24, 28);
24237 let r: i32x4 = transmute(vqdmull_high_laneq_s16::<4>(transmute(a), transmute(b)));
24238 assert_eq!(r, e);
24239 }
24240
24241 #[simd_test(enable = "neon")]
24242 unsafe fn test_vqdmull_high_laneq_s32() {
24243 let a: i32x4 = i32x4::new(0, 1, 4, 5);
24244 let b: i32x4 = i32x4::new(0, 2, 2, 0);
24245 let e: i64x2 = i64x2::new(16, 20);
24246 let r: i64x2 = transmute(vqdmull_high_laneq_s32::<2>(transmute(a), transmute(b)));
24247 assert_eq!(r, e);
24248 }
24249
24250 #[simd_test(enable = "neon")]
24251 unsafe fn test_vqdmlal_high_s16() {
24252 let a: i32x4 = i32x4::new(1, 2, 3, 4);
24253 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24254 let c: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24255 let e: i32x4 = i32x4::new(41, 62, 87, 116);
24256 let r: i32x4 = transmute(vqdmlal_high_s16(transmute(a), transmute(b), transmute(c)));
24257 assert_eq!(r, e);
24258 }
24259
24260 #[simd_test(enable = "neon")]
24261 unsafe fn test_vqdmlal_high_s32() {
24262 let a: i64x2 = i64x2::new(1, 2);
24263 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24264 let c: i32x4 = i32x4::new(1, 2, 5, 6);
24265 let e: i64x2 = i64x2::new(41, 62);
24266 let r: i64x2 = transmute(vqdmlal_high_s32(transmute(a), transmute(b), transmute(c)));
24267 assert_eq!(r, e);
24268 }
24269
24270 #[simd_test(enable = "neon")]
24271 unsafe fn test_vqdmlal_high_n_s16() {
24272 let a: i32x4 = i32x4::new(1, 2, 3, 4);
24273 let b: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24274 let c: i16 = 2;
24275 let e: i32x4 = i32x4::new(33, 42, 51, 60);
24276 let r: i32x4 = transmute(vqdmlal_high_n_s16(transmute(a), transmute(b), transmute(c)));
24277 assert_eq!(r, e);
24278 }
24279
24280 #[simd_test(enable = "neon")]
24281 unsafe fn test_vqdmlal_high_n_s32() {
24282 let a: i64x2 = i64x2::new(1, 2);
24283 let b: i32x4 = i32x4::new(0, 2, 8, 10);
24284 let c: i32 = 2;
24285 let e: i64x2 = i64x2::new(33, 42);
24286 let r: i64x2 = transmute(vqdmlal_high_n_s32(transmute(a), transmute(b), transmute(c)));
24287 assert_eq!(r, e);
24288 }
24289
24290 #[simd_test(enable = "neon")]
24291 unsafe fn test_vqdmlal_laneq_s16() {
24292 let a: i32x4 = i32x4::new(1, 2, 3, 4);
24293 let b: i16x4 = i16x4::new(1, 2, 3, 4);
24294 let c: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24295 let e: i32x4 = i32x4::new(5, 10, 15, 20);
24296 let r: i32x4 = transmute(vqdmlal_laneq_s16::<2>(transmute(a), transmute(b), transmute(c)));
24297 assert_eq!(r, e);
24298 }
24299
24300 #[simd_test(enable = "neon")]
24301 unsafe fn test_vqdmlal_laneq_s32() {
24302 let a: i64x2 = i64x2::new(1, 2);
24303 let b: i32x2 = i32x2::new(1, 2);
24304 let c: i32x4 = i32x4::new(0, 2, 2, 0);
24305 let e: i64x2 = i64x2::new(5, 10);
24306 let r: i64x2 = transmute(vqdmlal_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24307 assert_eq!(r, e);
24308 }
24309
24310 #[simd_test(enable = "neon")]
24311 unsafe fn test_vqdmlal_high_lane_s16() {
24312 let a: i32x4 = i32x4::new(1, 2, 3, 4);
24313 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24314 let c: i16x4 = i16x4::new(0, 2, 0, 0);
24315 let e: i32x4 = i32x4::new(17, 22, 27, 32);
24316 let r: i32x4 = transmute(vqdmlal_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24317 assert_eq!(r, e);
24318 }
24319
24320 #[simd_test(enable = "neon")]
24321 unsafe fn test_vqdmlal_high_laneq_s16() {
24322 let a: i32x4 = i32x4::new(1, 2, 3, 4);
24323 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24324 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24325 let e: i32x4 = i32x4::new(17, 22, 27, 32);
24326 let r: i32x4 = transmute(vqdmlal_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24327 assert_eq!(r, e);
24328 }
24329
24330 #[simd_test(enable = "neon")]
24331 unsafe fn test_vqdmlal_high_lane_s32() {
24332 let a: i64x2 = i64x2::new(1, 2);
24333 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24334 let c: i32x2 = i32x2::new(0, 2);
24335 let e: i64x2 = i64x2::new(17, 22);
24336 let r: i64x2 = transmute(vqdmlal_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24337 assert_eq!(r, e);
24338 }
24339
24340 #[simd_test(enable = "neon")]
24341 unsafe fn test_vqdmlal_high_laneq_s32() {
24342 let a: i64x2 = i64x2::new(1, 2);
24343 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24344 let c: i32x4 = i32x4::new(0, 2, 0, 0);
24345 let e: i64x2 = i64x2::new(17, 22);
24346 let r: i64x2 = transmute(vqdmlal_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24347 assert_eq!(r, e);
24348 }
24349
3c0e092e
XL
24350 #[simd_test(enable = "neon")]
24351 unsafe fn test_vqdmlalh_s16() {
24352 let a: i32 = 1;
24353 let b: i16 = 1;
24354 let c: i16 = 2;
24355 let e: i32 = 5;
24356 let r: i32 = transmute(vqdmlalh_s16(transmute(a), transmute(b), transmute(c)));
24357 assert_eq!(r, e);
24358 }
24359
24360 #[simd_test(enable = "neon")]
24361 unsafe fn test_vqdmlals_s32() {
24362 let a: i64 = 1;
24363 let b: i32 = 1;
24364 let c: i32 = 2;
24365 let e: i64 = 5;
24366 let r: i64 = transmute(vqdmlals_s32(transmute(a), transmute(b), transmute(c)));
24367 assert_eq!(r, e);
24368 }
24369
24370 #[simd_test(enable = "neon")]
24371 unsafe fn test_vqdmlalh_lane_s16() {
24372 let a: i32 = 1;
24373 let b: i16 = 1;
24374 let c: i16x4 = i16x4::new(2, 1, 1, 1);
24375 let e: i32 = 5;
24376 let r: i32 = transmute(vqdmlalh_lane_s16::<0>(transmute(a), transmute(b), transmute(c)));
24377 assert_eq!(r, e);
24378 }
24379
24380 #[simd_test(enable = "neon")]
24381 unsafe fn test_vqdmlalh_laneq_s16() {
24382 let a: i32 = 1;
24383 let b: i16 = 1;
24384 let c: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
24385 let e: i32 = 5;
24386 let r: i32 = transmute(vqdmlalh_laneq_s16::<0>(transmute(a), transmute(b), transmute(c)));
24387 assert_eq!(r, e);
24388 }
24389
24390 #[simd_test(enable = "neon")]
24391 unsafe fn test_vqdmlals_lane_s32() {
24392 let a: i64 = 1;
24393 let b: i32 = 1;
24394 let c: i32x2 = i32x2::new(2, 1);
24395 let e: i64 = 5;
24396 let r: i64 = transmute(vqdmlals_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
24397 assert_eq!(r, e);
24398 }
24399
24400 #[simd_test(enable = "neon")]
24401 unsafe fn test_vqdmlals_laneq_s32() {
24402 let a: i64 = 1;
24403 let b: i32 = 1;
24404 let c: i32x4 = i32x4::new(2, 1, 1, 1);
24405 let e: i64 = 5;
24406 let r: i64 = transmute(vqdmlals_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
24407 assert_eq!(r, e);
24408 }
24409
17df50a5
XL
24410 #[simd_test(enable = "neon")]
24411 unsafe fn test_vqdmlsl_high_s16() {
24412 let a: i32x4 = i32x4::new(39, 58, 81, 108);
24413 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24414 let c: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24415 let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24416 let r: i32x4 = transmute(vqdmlsl_high_s16(transmute(a), transmute(b), transmute(c)));
24417 assert_eq!(r, e);
24418 }
24419
24420 #[simd_test(enable = "neon")]
24421 unsafe fn test_vqdmlsl_high_s32() {
24422 let a: i64x2 = i64x2::new(39, 58);
24423 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24424 let c: i32x4 = i32x4::new(1, 2, 5, 6);
24425 let e: i64x2 = i64x2::new(-1, -2);
24426 let r: i64x2 = transmute(vqdmlsl_high_s32(transmute(a), transmute(b), transmute(c)));
24427 assert_eq!(r, e);
24428 }
24429
24430 #[simd_test(enable = "neon")]
24431 unsafe fn test_vqdmlsl_high_n_s16() {
24432 let a: i32x4 = i32x4::new(31, 38, 45, 52);
24433 let b: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24434 let c: i16 = 2;
24435 let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24436 let r: i32x4 = transmute(vqdmlsl_high_n_s16(transmute(a), transmute(b), transmute(c)));
24437 assert_eq!(r, e);
24438 }
24439
24440 #[simd_test(enable = "neon")]
24441 unsafe fn test_vqdmlsl_high_n_s32() {
24442 let a: i64x2 = i64x2::new(31, 38);
24443 let b: i32x4 = i32x4::new(0, 2, 8, 10);
24444 let c: i32 = 2;
24445 let e: i64x2 = i64x2::new(-1, -2);
24446 let r: i64x2 = transmute(vqdmlsl_high_n_s32(transmute(a), transmute(b), transmute(c)));
24447 assert_eq!(r, e);
24448 }
24449
24450 #[simd_test(enable = "neon")]
24451 unsafe fn test_vqdmlsl_laneq_s16() {
24452 let a: i32x4 = i32x4::new(3, 6, 9, 12);
24453 let b: i16x4 = i16x4::new(1, 2, 3, 4);
24454 let c: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24455 let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24456 let r: i32x4 = transmute(vqdmlsl_laneq_s16::<2>(transmute(a), transmute(b), transmute(c)));
24457 assert_eq!(r, e);
24458 }
24459
24460 #[simd_test(enable = "neon")]
24461 unsafe fn test_vqdmlsl_laneq_s32() {
24462 let a: i64x2 = i64x2::new(3, 6);
24463 let b: i32x2 = i32x2::new(1, 2);
24464 let c: i32x4 = i32x4::new(0, 2, 2, 0);
24465 let e: i64x2 = i64x2::new(-1, -2);
24466 let r: i64x2 = transmute(vqdmlsl_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24467 assert_eq!(r, e);
24468 }
24469
24470 #[simd_test(enable = "neon")]
24471 unsafe fn test_vqdmlsl_high_lane_s16() {
24472 let a: i32x4 = i32x4::new(15, 18, 21, 24);
24473 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24474 let c: i16x4 = i16x4::new(0, 2, 0, 0);
24475 let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24476 let r: i32x4 = transmute(vqdmlsl_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24477 assert_eq!(r, e);
24478 }
24479
24480 #[simd_test(enable = "neon")]
24481 unsafe fn test_vqdmlsl_high_laneq_s16() {
24482 let a: i32x4 = i32x4::new(15, 18, 21, 24);
24483 let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24484 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24485 let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24486 let r: i32x4 = transmute(vqdmlsl_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24487 assert_eq!(r, e);
24488 }
24489
24490 #[simd_test(enable = "neon")]
3c0e092e
XL
24491 unsafe fn test_vqdmlsl_high_lane_s32() {
24492 let a: i64x2 = i64x2::new(15, 18);
24493 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24494 let c: i32x2 = i32x2::new(0, 2);
24495 let e: i64x2 = i64x2::new(-1, -2);
24496 let r: i64x2 = transmute(vqdmlsl_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24497 assert_eq!(r, e);
24498 }
24499
24500 #[simd_test(enable = "neon")]
24501 unsafe fn test_vqdmlsl_high_laneq_s32() {
24502 let a: i64x2 = i64x2::new(15, 18);
24503 let b: i32x4 = i32x4::new(0, 1, 4, 5);
24504 let c: i32x4 = i32x4::new(0, 2, 0, 0);
24505 let e: i64x2 = i64x2::new(-1, -2);
24506 let r: i64x2 = transmute(vqdmlsl_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24507 assert_eq!(r, e);
24508 }
24509
24510 #[simd_test(enable = "neon")]
24511 unsafe fn test_vqdmlslh_s16() {
24512 let a: i32 = 10;
24513 let b: i16 = 1;
24514 let c: i16 = 2;
24515 let e: i32 = 6;
24516 let r: i32 = transmute(vqdmlslh_s16(transmute(a), transmute(b), transmute(c)));
24517 assert_eq!(r, e);
24518 }
24519
24520 #[simd_test(enable = "neon")]
24521 unsafe fn test_vqdmlsls_s32() {
24522 let a: i64 = 10;
24523 let b: i32 = 1;
24524 let c: i32 = 2;
24525 let e: i64 = 6;
24526 let r: i64 = transmute(vqdmlsls_s32(transmute(a), transmute(b), transmute(c)));
24527 assert_eq!(r, e);
24528 }
24529
24530 #[simd_test(enable = "neon")]
24531 unsafe fn test_vqdmlslh_lane_s16() {
24532 let a: i32 = 10;
24533 let b: i16 = 1;
24534 let c: i16x4 = i16x4::new(2, 1, 1, 1);
24535 let e: i32 = 6;
24536 let r: i32 = transmute(vqdmlslh_lane_s16::<0>(transmute(a), transmute(b), transmute(c)));
24537 assert_eq!(r, e);
24538 }
24539
24540 #[simd_test(enable = "neon")]
24541 unsafe fn test_vqdmlslh_laneq_s16() {
24542 let a: i32 = 10;
24543 let b: i16 = 1;
24544 let c: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
24545 let e: i32 = 6;
24546 let r: i32 = transmute(vqdmlslh_laneq_s16::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
24547 assert_eq!(r, e);
24548 }
24549
24550 #[simd_test(enable = "neon")]
3c0e092e
XL
24551 unsafe fn test_vqdmlsls_lane_s32() {
24552 let a: i64 = 10;
24553 let b: i32 = 1;
24554 let c: i32x2 = i32x2::new(2, 1);
24555 let e: i64 = 6;
24556 let r: i64 = transmute(vqdmlsls_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
24557 assert_eq!(r, e);
24558 }
24559
24560 #[simd_test(enable = "neon")]
24561 unsafe fn test_vqdmlsls_laneq_s32() {
24562 let a: i64 = 10;
24563 let b: i32 = 1;
24564 let c: i32x4 = i32x4::new(2, 1, 1, 1);
24565 let e: i64 = 6;
24566 let r: i64 = transmute(vqdmlsls_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
17df50a5
XL
24567 assert_eq!(r, e);
24568 }
24569
24570 #[simd_test(enable = "neon")]
24571 unsafe fn test_vqdmulhh_s16() {
24572 let a: i16 = 1;
24573 let b: i16 = 2;
24574 let e: i16 = 0;
24575 let r: i16 = transmute(vqdmulhh_s16(transmute(a), transmute(b)));
24576 assert_eq!(r, e);
24577 }
24578
24579 #[simd_test(enable = "neon")]
24580 unsafe fn test_vqdmulhs_s32() {
24581 let a: i32 = 1;
24582 let b: i32 = 2;
24583 let e: i32 = 0;
24584 let r: i32 = transmute(vqdmulhs_s32(transmute(a), transmute(b)));
24585 assert_eq!(r, e);
24586 }
24587
24588 #[simd_test(enable = "neon")]
24589 unsafe fn test_vqdmulhh_lane_s16() {
24590 let a: i16 = 2;
24591 let b: i16x4 = i16x4::new(0, 0, 0x7F_FF, 0);
24592 let e: i16 = 1;
24593 let r: i16 = transmute(vqdmulhh_lane_s16::<2>(transmute(a), transmute(b)));
24594 assert_eq!(r, e);
24595 }
24596
24597 #[simd_test(enable = "neon")]
24598 unsafe fn test_vqdmulhh_laneq_s16() {
24599 let a: i16 = 2;
24600 let b: i16x8 = i16x8::new(0, 0, 0x7F_FF, 0, 0, 0, 0, 0);
24601 let e: i16 = 1;
24602 let r: i16 = transmute(vqdmulhh_laneq_s16::<2>(transmute(a), transmute(b)));
24603 assert_eq!(r, e);
24604 }
24605
24606 #[simd_test(enable = "neon")]
24607 unsafe fn test_vqdmulhs_lane_s32() {
24608 let a: i32 = 2;
24609 let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
24610 let e: i32 = 1;
24611 let r: i32 = transmute(vqdmulhs_lane_s32::<1>(transmute(a), transmute(b)));
24612 assert_eq!(r, e);
24613 }
24614
24615 #[simd_test(enable = "neon")]
24616 unsafe fn test_vqdmulhs_laneq_s32() {
24617 let a: i32 = 2;
24618 let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
24619 let e: i32 = 1;
24620 let r: i32 = transmute(vqdmulhs_laneq_s32::<1>(transmute(a), transmute(b)));
24621 assert_eq!(r, e);
24622 }
24623
3c0e092e
XL
24624 #[simd_test(enable = "neon")]
24625 unsafe fn test_vqdmulh_lane_s16() {
24626 let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24627 let b: i16x4 = i16x4::new(2, 1, 1, 1);
24628 let e: i16x4 = i16x4::new(1, 1, 1, 1);
24629 let r: i16x4 = transmute(vqdmulh_lane_s16::<0>(transmute(a), transmute(b)));
24630 assert_eq!(r, e);
24631 }
24632
24633 #[simd_test(enable = "neon")]
24634 unsafe fn test_vqdmulhq_lane_s16() {
24635 let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24636 let b: i16x4 = i16x4::new(2, 1, 1, 1);
24637 let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24638 let r: i16x8 = transmute(vqdmulhq_lane_s16::<0>(transmute(a), transmute(b)));
24639 assert_eq!(r, e);
24640 }
24641
24642 #[simd_test(enable = "neon")]
24643 unsafe fn test_vqdmulh_lane_s32() {
24644 let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24645 let b: i32x2 = i32x2::new(2, 1);
24646 let e: i32x2 = i32x2::new(1, 1);
24647 let r: i32x2 = transmute(vqdmulh_lane_s32::<0>(transmute(a), transmute(b)));
24648 assert_eq!(r, e);
24649 }
24650
24651 #[simd_test(enable = "neon")]
24652 unsafe fn test_vqdmulhq_lane_s32() {
24653 let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24654 let b: i32x2 = i32x2::new(2, 1);
24655 let e: i32x4 = i32x4::new(1, 1, 1, 1);
24656 let r: i32x4 = transmute(vqdmulhq_lane_s32::<0>(transmute(a), transmute(b)));
24657 assert_eq!(r, e);
24658 }
24659
17df50a5
XL
24660 #[simd_test(enable = "neon")]
24661 unsafe fn test_vqmovnh_s16() {
24662 let a: i16 = 1;
24663 let e: i8 = 1;
24664 let r: i8 = transmute(vqmovnh_s16(transmute(a)));
24665 assert_eq!(r, e);
24666 }
24667
24668 #[simd_test(enable = "neon")]
24669 unsafe fn test_vqmovns_s32() {
24670 let a: i32 = 1;
24671 let e: i16 = 1;
24672 let r: i16 = transmute(vqmovns_s32(transmute(a)));
24673 assert_eq!(r, e);
24674 }
24675
24676 #[simd_test(enable = "neon")]
24677 unsafe fn test_vqmovnh_u16() {
24678 let a: u16 = 1;
24679 let e: u8 = 1;
24680 let r: u8 = transmute(vqmovnh_u16(transmute(a)));
24681 assert_eq!(r, e);
24682 }
24683
24684 #[simd_test(enable = "neon")]
24685 unsafe fn test_vqmovns_u32() {
24686 let a: u32 = 1;
24687 let e: u16 = 1;
24688 let r: u16 = transmute(vqmovns_u32(transmute(a)));
24689 assert_eq!(r, e);
24690 }
24691
24692 #[simd_test(enable = "neon")]
24693 unsafe fn test_vqmovnd_s64() {
24694 let a: i64 = 1;
24695 let e: i32 = 1;
24696 let r: i32 = transmute(vqmovnd_s64(transmute(a)));
24697 assert_eq!(r, e);
24698 }
24699
24700 #[simd_test(enable = "neon")]
24701 unsafe fn test_vqmovnd_u64() {
24702 let a: u64 = 1;
24703 let e: u32 = 1;
24704 let r: u32 = transmute(vqmovnd_u64(transmute(a)));
24705 assert_eq!(r, e);
24706 }
24707
24708 #[simd_test(enable = "neon")]
24709 unsafe fn test_vqmovn_high_s16() {
24710 let a: i8x8 = i8x8::new(0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F);
24711 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24712 let e: i8x16 = i8x16::new(0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F);
24713 let r: i8x16 = transmute(vqmovn_high_s16(transmute(a), transmute(b)));
24714 assert_eq!(r, e);
24715 }
24716
24717 #[simd_test(enable = "neon")]
24718 unsafe fn test_vqmovn_high_s32() {
24719 let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24720 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24721 let e: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24722 let r: i16x8 = transmute(vqmovn_high_s32(transmute(a), transmute(b)));
24723 assert_eq!(r, e);
24724 }
24725
24726 #[simd_test(enable = "neon")]
24727 unsafe fn test_vqmovn_high_s64() {
24728 let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24729 let b: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 0x7F_FF_FF_FF_FF_FF_FF_FF);
24730 let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24731 let r: i32x4 = transmute(vqmovn_high_s64(transmute(a), transmute(b)));
24732 assert_eq!(r, e);
24733 }
24734
24735 #[simd_test(enable = "neon")]
24736 unsafe fn test_vqmovn_high_u16() {
24737 let a: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
24738 let b: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24739 let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
24740 let r: u8x16 = transmute(vqmovn_high_u16(transmute(a), transmute(b)));
24741 assert_eq!(r, e);
24742 }
24743
24744 #[simd_test(enable = "neon")]
24745 unsafe fn test_vqmovn_high_u32() {
24746 let a: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24747 let b: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24748 let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24749 let r: u16x8 = transmute(vqmovn_high_u32(transmute(a), transmute(b)));
24750 assert_eq!(r, e);
24751 }
24752
24753 #[simd_test(enable = "neon")]
24754 unsafe fn test_vqmovn_high_u64() {
24755 let a: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24756 let b: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
24757 let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24758 let r: u32x4 = transmute(vqmovn_high_u64(transmute(a), transmute(b)));
24759 assert_eq!(r, e);
24760 }
24761
24762 #[simd_test(enable = "neon")]
24763 unsafe fn test_vqmovunh_s16() {
24764 let a: i16 = 1;
24765 let e: u8 = 1;
24766 let r: u8 = transmute(vqmovunh_s16(transmute(a)));
24767 assert_eq!(r, e);
24768 }
24769
24770 #[simd_test(enable = "neon")]
24771 unsafe fn test_vqmovuns_s32() {
24772 let a: i32 = 1;
24773 let e: u16 = 1;
24774 let r: u16 = transmute(vqmovuns_s32(transmute(a)));
24775 assert_eq!(r, e);
24776 }
24777
24778 #[simd_test(enable = "neon")]
24779 unsafe fn test_vqmovund_s64() {
24780 let a: i64 = 1;
24781 let e: u32 = 1;
24782 let r: u32 = transmute(vqmovund_s64(transmute(a)));
24783 assert_eq!(r, e);
24784 }
24785
24786 #[simd_test(enable = "neon")]
24787 unsafe fn test_vqmovun_high_s16() {
24788 let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
24789 let b: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
24790 let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
24791 let r: u8x16 = transmute(vqmovun_high_s16(transmute(a), transmute(b)));
24792 assert_eq!(r, e);
24793 }
24794
24795 #[simd_test(enable = "neon")]
24796 unsafe fn test_vqmovun_high_s32() {
24797 let a: u16x4 = u16x4::new(0, 0, 0, 0);
24798 let b: i32x4 = i32x4::new(-1, -1, -1, -1);
24799 let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
24800 let r: u16x8 = transmute(vqmovun_high_s32(transmute(a), transmute(b)));
24801 assert_eq!(r, e);
24802 }
24803
24804 #[simd_test(enable = "neon")]
24805 unsafe fn test_vqmovun_high_s64() {
24806 let a: u32x2 = u32x2::new(0, 0);
24807 let b: i64x2 = i64x2::new(-1, -1);
24808 let e: u32x4 = u32x4::new(0, 0, 0, 0);
24809 let r: u32x4 = transmute(vqmovun_high_s64(transmute(a), transmute(b)));
24810 assert_eq!(r, e);
24811 }
24812
24813 #[simd_test(enable = "neon")]
24814 unsafe fn test_vqrdmulhh_s16() {
24815 let a: i16 = 1;
24816 let b: i16 = 2;
24817 let e: i16 = 0;
24818 let r: i16 = transmute(vqrdmulhh_s16(transmute(a), transmute(b)));
24819 assert_eq!(r, e);
24820 }
24821
24822 #[simd_test(enable = "neon")]
24823 unsafe fn test_vqrdmulhs_s32() {
24824 let a: i32 = 1;
24825 let b: i32 = 2;
24826 let e: i32 = 0;
24827 let r: i32 = transmute(vqrdmulhs_s32(transmute(a), transmute(b)));
24828 assert_eq!(r, e);
24829 }
24830
24831 #[simd_test(enable = "neon")]
24832 unsafe fn test_vqrdmulhh_lane_s16() {
24833 let a: i16 = 1;
24834 let b: i16x4 = i16x4::new(0, 2, 0, 0);
24835 let e: i16 = 0;
24836 let r: i16 = transmute(vqrdmulhh_lane_s16::<1>(transmute(a), transmute(b)));
24837 assert_eq!(r, e);
24838 }
24839
24840 #[simd_test(enable = "neon")]
24841 unsafe fn test_vqrdmulhh_laneq_s16() {
24842 let a: i16 = 1;
24843 let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24844 let e: i16 = 0;
24845 let r: i16 = transmute(vqrdmulhh_laneq_s16::<1>(transmute(a), transmute(b)));
24846 assert_eq!(r, e);
24847 }
24848
24849 #[simd_test(enable = "neon")]
24850 unsafe fn test_vqrdmulhs_lane_s32() {
24851 let a: i32 = 1;
24852 let b: i32x2 = i32x2::new(0, 2);
24853 let e: i32 = 0;
24854 let r: i32 = transmute(vqrdmulhs_lane_s32::<1>(transmute(a), transmute(b)));
24855 assert_eq!(r, e);
24856 }
24857
24858 #[simd_test(enable = "neon")]
24859 unsafe fn test_vqrdmulhs_laneq_s32() {
24860 let a: i32 = 1;
24861 let b: i32x4 = i32x4::new(0, 2, 0, 0);
24862 let e: i32 = 0;
24863 let r: i32 = transmute(vqrdmulhs_laneq_s32::<1>(transmute(a), transmute(b)));
24864 assert_eq!(r, e);
24865 }
24866
353b0b11 24867 #[simd_test(enable = "rdm")]
3c0e092e
XL
24868 unsafe fn test_vqrdmlah_s16() {
24869 let a: i16x4 = i16x4::new(1, 1, 1, 1);
24870 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24871 let c: i16x4 = i16x4::new(2, 2, 2, 2);
24872 let e: i16x4 = i16x4::new(3, 3, 3, 3);
24873 let r: i16x4 = transmute(vqrdmlah_s16(transmute(a), transmute(b), transmute(c)));
24874 assert_eq!(r, e);
24875 }
24876
353b0b11 24877 #[simd_test(enable = "rdm")]
3c0e092e
XL
24878 unsafe fn test_vqrdmlahq_s16() {
24879 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24880 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24881 let c: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
24882 let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
24883 let r: i16x8 = transmute(vqrdmlahq_s16(transmute(a), transmute(b), transmute(c)));
24884 assert_eq!(r, e);
24885 }
24886
353b0b11 24887 #[simd_test(enable = "rdm")]
3c0e092e
XL
24888 unsafe fn test_vqrdmlah_s32() {
24889 let a: i32x2 = i32x2::new(1, 1);
24890 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24891 let c: i32x2 = i32x2::new(2, 2);
24892 let e: i32x2 = i32x2::new(3, 3);
24893 let r: i32x2 = transmute(vqrdmlah_s32(transmute(a), transmute(b), transmute(c)));
24894 assert_eq!(r, e);
24895 }
24896
353b0b11 24897 #[simd_test(enable = "rdm")]
3c0e092e
XL
24898 unsafe fn test_vqrdmlahq_s32() {
24899 let a: i32x4 = i32x4::new(1, 1, 1, 1);
24900 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24901 let c: i32x4 = i32x4::new(2, 2, 2, 2);
24902 let e: i32x4 = i32x4::new(3, 3, 3, 3);
24903 let r: i32x4 = transmute(vqrdmlahq_s32(transmute(a), transmute(b), transmute(c)));
24904 assert_eq!(r, e);
24905 }
24906
353b0b11 24907 #[simd_test(enable = "rdm")]
17df50a5
XL
24908 unsafe fn test_vqrdmlahh_s16() {
24909 let a: i16 = 1;
24910 let b: i16 = 1;
24911 let c: i16 = 2;
24912 let e: i16 = 1;
24913 let r: i16 = transmute(vqrdmlahh_s16(transmute(a), transmute(b), transmute(c)));
24914 assert_eq!(r, e);
24915 }
24916
353b0b11 24917 #[simd_test(enable = "rdm")]
17df50a5
XL
24918 unsafe fn test_vqrdmlahs_s32() {
24919 let a: i32 = 1;
24920 let b: i32 = 1;
24921 let c: i32 = 2;
24922 let e: i32 = 1;
24923 let r: i32 = transmute(vqrdmlahs_s32(transmute(a), transmute(b), transmute(c)));
24924 assert_eq!(r, e);
24925 }
24926
353b0b11 24927 #[simd_test(enable = "rdm")]
3c0e092e
XL
24928 unsafe fn test_vqrdmlah_lane_s16() {
24929 let a: i16x4 = i16x4::new(1, 1, 1, 1);
24930 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24931 let c: i16x4 = i16x4::new(0, 2, 0, 0);
24932 let e: i16x4 = i16x4::new(3, 3, 3, 3);
24933 let r: i16x4 = transmute(vqrdmlah_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24934 assert_eq!(r, e);
24935 }
24936
353b0b11 24937 #[simd_test(enable = "rdm")]
3c0e092e
XL
24938 unsafe fn test_vqrdmlah_laneq_s16() {
24939 let a: i16x4 = i16x4::new(1, 1, 1, 1);
24940 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24941 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24942 let e: i16x4 = i16x4::new(3, 3, 3, 3);
24943 let r: i16x4 = transmute(vqrdmlah_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24944 assert_eq!(r, e);
24945 }
24946
353b0b11 24947 #[simd_test(enable = "rdm")]
3c0e092e
XL
24948 unsafe fn test_vqrdmlahq_lane_s16() {
24949 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24950 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24951 let c: i16x4 = i16x4::new(0, 2, 0, 0);
24952 let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
24953 let r: i16x8 = transmute(vqrdmlahq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24954 assert_eq!(r, e);
24955 }
24956
353b0b11 24957 #[simd_test(enable = "rdm")]
3c0e092e
XL
24958 unsafe fn test_vqrdmlahq_laneq_s16() {
24959 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24960 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24961 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24962 let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
24963 let r: i16x8 = transmute(vqrdmlahq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24964 assert_eq!(r, e);
24965 }
24966
353b0b11 24967 #[simd_test(enable = "rdm")]
3c0e092e
XL
24968 unsafe fn test_vqrdmlah_lane_s32() {
24969 let a: i32x2 = i32x2::new(1, 1);
24970 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24971 let c: i32x2 = i32x2::new(0, 2);
24972 let e: i32x2 = i32x2::new(3, 3);
24973 let r: i32x2 = transmute(vqrdmlah_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24974 assert_eq!(r, e);
24975 }
24976
353b0b11 24977 #[simd_test(enable = "rdm")]
3c0e092e
XL
24978 unsafe fn test_vqrdmlah_laneq_s32() {
24979 let a: i32x2 = i32x2::new(1, 1);
24980 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24981 let c: i32x4 = i32x4::new(0, 2, 0, 0);
24982 let e: i32x2 = i32x2::new(3, 3);
24983 let r: i32x2 = transmute(vqrdmlah_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24984 assert_eq!(r, e);
24985 }
24986
353b0b11 24987 #[simd_test(enable = "rdm")]
3c0e092e
XL
24988 unsafe fn test_vqrdmlahq_lane_s32() {
24989 let a: i32x4 = i32x4::new(1, 1, 1, 1);
24990 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24991 let c: i32x2 = i32x2::new(0, 2);
24992 let e: i32x4 = i32x4::new(3, 3, 3, 3);
24993 let r: i32x4 = transmute(vqrdmlahq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24994 assert_eq!(r, e);
24995 }
24996
353b0b11 24997 #[simd_test(enable = "rdm")]
3c0e092e
XL
24998 unsafe fn test_vqrdmlahq_laneq_s32() {
24999 let a: i32x4 = i32x4::new(1, 1, 1, 1);
25000 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25001 let c: i32x4 = i32x4::new(0, 2, 0, 0);
25002 let e: i32x4 = i32x4::new(3, 3, 3, 3);
25003 let r: i32x4 = transmute(vqrdmlahq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25004 assert_eq!(r, e);
25005 }
25006
353b0b11 25007 #[simd_test(enable = "rdm")]
17df50a5
XL
25008 unsafe fn test_vqrdmlahh_lane_s16() {
25009 let a: i16 = 1;
25010 let b: i16 = 1;
25011 let c: i16x4 = i16x4::new(0, 2, 0, 0);
25012 let e: i16 = 1;
25013 let r: i16 = transmute(vqrdmlahh_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25014 assert_eq!(r, e);
25015 }
25016
353b0b11 25017 #[simd_test(enable = "rdm")]
17df50a5
XL
25018 unsafe fn test_vqrdmlahh_laneq_s16() {
25019 let a: i16 = 1;
25020 let b: i16 = 1;
25021 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25022 let e: i16 = 1;
25023 let r: i16 = transmute(vqrdmlahh_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25024 assert_eq!(r, e);
25025 }
25026
353b0b11 25027 #[simd_test(enable = "rdm")]
17df50a5
XL
25028 unsafe fn test_vqrdmlahs_lane_s32() {
25029 let a: i32 = 1;
25030 let b: i32 = 1;
25031 let c: i32x2 = i32x2::new(0, 2);
25032 let e: i32 = 1;
25033 let r: i32 = transmute(vqrdmlahs_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25034 assert_eq!(r, e);
25035 }
25036
353b0b11 25037 #[simd_test(enable = "rdm")]
17df50a5
XL
25038 unsafe fn test_vqrdmlahs_laneq_s32() {
25039 let a: i32 = 1;
25040 let b: i32 = 1;
25041 let c: i32x4 = i32x4::new(0, 2, 0, 0);
25042 let e: i32 = 1;
25043 let r: i32 = transmute(vqrdmlahs_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25044 assert_eq!(r, e);
25045 }
25046
353b0b11 25047 #[simd_test(enable = "rdm")]
04454e1e
FG
25048 unsafe fn test_vqrdmlsh_s16() {
25049 let a: i16x4 = i16x4::new(1, 1, 1, 1);
25050 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25051 let c: i16x4 = i16x4::new(2, 2, 2, 2);
25052 let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25053 let r: i16x4 = transmute(vqrdmlsh_s16(transmute(a), transmute(b), transmute(c)));
25054 assert_eq!(r, e);
25055 }
25056
353b0b11 25057 #[simd_test(enable = "rdm")]
04454e1e
FG
25058 unsafe fn test_vqrdmlshq_s16() {
25059 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25060 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25061 let c: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
25062 let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25063 let r: i16x8 = transmute(vqrdmlshq_s16(transmute(a), transmute(b), transmute(c)));
25064 assert_eq!(r, e);
25065 }
25066
353b0b11 25067 #[simd_test(enable = "rdm")]
04454e1e
FG
25068 unsafe fn test_vqrdmlsh_s32() {
25069 let a: i32x2 = i32x2::new(1, 1);
25070 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25071 let c: i32x2 = i32x2::new(2, 2);
25072 let e: i32x2 = i32x2::new(-1, -1);
25073 let r: i32x2 = transmute(vqrdmlsh_s32(transmute(a), transmute(b), transmute(c)));
25074 assert_eq!(r, e);
25075 }
25076
353b0b11 25077 #[simd_test(enable = "rdm")]
04454e1e
FG
25078 unsafe fn test_vqrdmlshq_s32() {
25079 let a: i32x4 = i32x4::new(1, 1, 1, 1);
25080 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25081 let c: i32x4 = i32x4::new(2, 2, 2, 2);
25082 let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25083 let r: i32x4 = transmute(vqrdmlshq_s32(transmute(a), transmute(b), transmute(c)));
25084 assert_eq!(r, e);
25085 }
25086
353b0b11 25087 #[simd_test(enable = "rdm")]
17df50a5
XL
25088 unsafe fn test_vqrdmlshh_s16() {
25089 let a: i16 = 1;
25090 let b: i16 = 1;
25091 let c: i16 = 2;
25092 let e: i16 = 1;
25093 let r: i16 = transmute(vqrdmlshh_s16(transmute(a), transmute(b), transmute(c)));
25094 assert_eq!(r, e);
25095 }
25096
353b0b11 25097 #[simd_test(enable = "rdm")]
17df50a5
XL
25098 unsafe fn test_vqrdmlshs_s32() {
25099 let a: i32 = 1;
25100 let b: i32 = 1;
25101 let c: i32 = 2;
25102 let e: i32 = 1;
25103 let r: i32 = transmute(vqrdmlshs_s32(transmute(a), transmute(b), transmute(c)));
25104 assert_eq!(r, e);
25105 }
25106
353b0b11 25107 #[simd_test(enable = "rdm")]
04454e1e
FG
25108 unsafe fn test_vqrdmlsh_lane_s16() {
25109 let a: i16x4 = i16x4::new(1, 1, 1, 1);
25110 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25111 let c: i16x4 = i16x4::new(0, 2, 0, 0);
25112 let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25113 let r: i16x4 = transmute(vqrdmlsh_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25114 assert_eq!(r, e);
25115 }
25116
353b0b11 25117 #[simd_test(enable = "rdm")]
04454e1e
FG
25118 unsafe fn test_vqrdmlsh_laneq_s16() {
25119 let a: i16x4 = i16x4::new(1, 1, 1, 1);
25120 let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25121 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25122 let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25123 let r: i16x4 = transmute(vqrdmlsh_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25124 assert_eq!(r, e);
25125 }
25126
353b0b11 25127 #[simd_test(enable = "rdm")]
04454e1e
FG
25128 unsafe fn test_vqrdmlshq_lane_s16() {
25129 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25130 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25131 let c: i16x4 = i16x4::new(0, 2, 0, 0);
25132 let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25133 let r: i16x8 = transmute(vqrdmlshq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25134 assert_eq!(r, e);
25135 }
25136
353b0b11 25137 #[simd_test(enable = "rdm")]
04454e1e
FG
25138 unsafe fn test_vqrdmlshq_laneq_s16() {
25139 let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25140 let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25141 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25142 let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25143 let r: i16x8 = transmute(vqrdmlshq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25144 assert_eq!(r, e);
25145 }
25146
353b0b11 25147 #[simd_test(enable = "rdm")]
04454e1e
FG
25148 unsafe fn test_vqrdmlsh_lane_s32() {
25149 let a: i32x2 = i32x2::new(1, 1);
25150 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25151 let c: i32x2 = i32x2::new(0, 2);
25152 let e: i32x2 = i32x2::new(-1, -1);
25153 let r: i32x2 = transmute(vqrdmlsh_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25154 assert_eq!(r, e);
25155 }
25156
353b0b11 25157 #[simd_test(enable = "rdm")]
04454e1e
FG
25158 unsafe fn test_vqrdmlsh_laneq_s32() {
25159 let a: i32x2 = i32x2::new(1, 1);
25160 let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25161 let c: i32x4 = i32x4::new(0, 2, 0, 0);
25162 let e: i32x2 = i32x2::new(-1, -1);
25163 let r: i32x2 = transmute(vqrdmlsh_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25164 assert_eq!(r, e);
25165 }
25166
353b0b11 25167 #[simd_test(enable = "rdm")]
04454e1e
FG
25168 unsafe fn test_vqrdmlshq_lane_s32() {
25169 let a: i32x4 = i32x4::new(1, 1, 1, 1);
25170 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25171 let c: i32x2 = i32x2::new(0, 2);
25172 let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25173 let r: i32x4 = transmute(vqrdmlshq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25174 assert_eq!(r, e);
25175 }
25176
353b0b11 25177 #[simd_test(enable = "rdm")]
04454e1e
FG
25178 unsafe fn test_vqrdmlshq_laneq_s32() {
25179 let a: i32x4 = i32x4::new(1, 1, 1, 1);
25180 let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25181 let c: i32x4 = i32x4::new(0, 2, 0, 0);
25182 let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25183 let r: i32x4 = transmute(vqrdmlshq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25184 assert_eq!(r, e);
25185 }
25186
353b0b11 25187 #[simd_test(enable = "rdm")]
17df50a5
XL
25188 unsafe fn test_vqrdmlshh_lane_s16() {
25189 let a: i16 = 1;
25190 let b: i16 = 1;
25191 let c: i16x4 = i16x4::new(0, 2, 0, 0);
25192 let e: i16 = 1;
25193 let r: i16 = transmute(vqrdmlshh_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25194 assert_eq!(r, e);
25195 }
25196
353b0b11 25197 #[simd_test(enable = "rdm")]
17df50a5
XL
25198 unsafe fn test_vqrdmlshh_laneq_s16() {
25199 let a: i16 = 1;
25200 let b: i16 = 1;
25201 let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25202 let e: i16 = 1;
25203 let r: i16 = transmute(vqrdmlshh_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25204 assert_eq!(r, e);
25205 }
25206
353b0b11 25207 #[simd_test(enable = "rdm")]
17df50a5
XL
25208 unsafe fn test_vqrdmlshs_lane_s32() {
25209 let a: i32 = 1;
25210 let b: i32 = 1;
25211 let c: i32x2 = i32x2::new(0, 2);
25212 let e: i32 = 1;
25213 let r: i32 = transmute(vqrdmlshs_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25214 assert_eq!(r, e);
25215 }
25216
353b0b11 25217 #[simd_test(enable = "rdm")]
17df50a5
XL
25218 unsafe fn test_vqrdmlshs_laneq_s32() {
25219 let a: i32 = 1;
25220 let b: i32 = 1;
25221 let c: i32x4 = i32x4::new(0, 2, 0, 0);
25222 let e: i32 = 1;
25223 let r: i32 = transmute(vqrdmlshs_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25224 assert_eq!(r, e);
25225 }
25226
25227 #[simd_test(enable = "neon")]
25228 unsafe fn test_vqrshls_s32() {
25229 let a: i32 = 2;
25230 let b: i32 = 2;
25231 let e: i32 = 8;
25232 let r: i32 = transmute(vqrshls_s32(transmute(a), transmute(b)));
25233 assert_eq!(r, e);
25234 }
25235
25236 #[simd_test(enable = "neon")]
25237 unsafe fn test_vqrshld_s64() {
25238 let a: i64 = 2;
25239 let b: i64 = 2;
25240 let e: i64 = 8;
25241 let r: i64 = transmute(vqrshld_s64(transmute(a), transmute(b)));
25242 assert_eq!(r, e);
25243 }
25244
25245 #[simd_test(enable = "neon")]
25246 unsafe fn test_vqrshlb_s8() {
25247 let a: i8 = 1;
25248 let b: i8 = 2;
25249 let e: i8 = 4;
25250 let r: i8 = transmute(vqrshlb_s8(transmute(a), transmute(b)));
25251 assert_eq!(r, e);
25252 }
25253
25254 #[simd_test(enable = "neon")]
25255 unsafe fn test_vqrshlh_s16() {
25256 let a: i16 = 1;
25257 let b: i16 = 2;
25258 let e: i16 = 4;
25259 let r: i16 = transmute(vqrshlh_s16(transmute(a), transmute(b)));
25260 assert_eq!(r, e);
25261 }
25262
25263 #[simd_test(enable = "neon")]
25264 unsafe fn test_vqrshls_u32() {
25265 let a: u32 = 2;
25266 let b: i32 = 2;
25267 let e: u32 = 8;
25268 let r: u32 = transmute(vqrshls_u32(transmute(a), transmute(b)));
25269 assert_eq!(r, e);
25270 }
25271
25272 #[simd_test(enable = "neon")]
25273 unsafe fn test_vqrshld_u64() {
25274 let a: u64 = 2;
25275 let b: i64 = 2;
25276 let e: u64 = 8;
25277 let r: u64 = transmute(vqrshld_u64(transmute(a), transmute(b)));
25278 assert_eq!(r, e);
25279 }
25280
25281 #[simd_test(enable = "neon")]
25282 unsafe fn test_vqrshlb_u8() {
25283 let a: u8 = 1;
25284 let b: i8 = 2;
25285 let e: u8 = 4;
25286 let r: u8 = transmute(vqrshlb_u8(transmute(a), transmute(b)));
25287 assert_eq!(r, e);
25288 }
25289
25290 #[simd_test(enable = "neon")]
25291 unsafe fn test_vqrshlh_u16() {
25292 let a: u16 = 1;
25293 let b: i16 = 2;
25294 let e: u16 = 4;
25295 let r: u16 = transmute(vqrshlh_u16(transmute(a), transmute(b)));
25296 assert_eq!(r, e);
25297 }
25298
25299 #[simd_test(enable = "neon")]
25300 unsafe fn test_vqrshrnh_n_s16() {
25301 let a: i16 = 4;
25302 let e: i8 = 1;
25303 let r: i8 = transmute(vqrshrnh_n_s16::<2>(transmute(a)));
25304 assert_eq!(r, e);
25305 }
25306
25307 #[simd_test(enable = "neon")]
25308 unsafe fn test_vqrshrns_n_s32() {
25309 let a: i32 = 4;
25310 let e: i16 = 1;
25311 let r: i16 = transmute(vqrshrns_n_s32::<2>(transmute(a)));
25312 assert_eq!(r, e);
25313 }
25314
25315 #[simd_test(enable = "neon")]
25316 unsafe fn test_vqrshrnd_n_s64() {
25317 let a: i64 = 4;
25318 let e: i32 = 1;
25319 let r: i32 = transmute(vqrshrnd_n_s64::<2>(transmute(a)));
25320 assert_eq!(r, e);
25321 }
25322
25323 #[simd_test(enable = "neon")]
25324 unsafe fn test_vqrshrn_high_n_s16() {
25325 let a: i8x8 = i8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25326 let b: i16x8 = i16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25327 let e: i8x16 = i8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25328 let r: i8x16 = transmute(vqrshrn_high_n_s16::<2>(transmute(a), transmute(b)));
25329 assert_eq!(r, e);
25330 }
25331
25332 #[simd_test(enable = "neon")]
25333 unsafe fn test_vqrshrn_high_n_s32() {
25334 let a: i16x4 = i16x4::new(0, 1, 2, 3);
25335 let b: i32x4 = i32x4::new(8, 12, 24, 28);
25336 let e: i16x8 = i16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25337 let r: i16x8 = transmute(vqrshrn_high_n_s32::<2>(transmute(a), transmute(b)));
25338 assert_eq!(r, e);
25339 }
25340
25341 #[simd_test(enable = "neon")]
25342 unsafe fn test_vqrshrn_high_n_s64() {
25343 let a: i32x2 = i32x2::new(0, 1);
25344 let b: i64x2 = i64x2::new(8, 12);
25345 let e: i32x4 = i32x4::new(0, 1, 2, 3);
25346 let r: i32x4 = transmute(vqrshrn_high_n_s64::<2>(transmute(a), transmute(b)));
25347 assert_eq!(r, e);
25348 }
25349
25350 #[simd_test(enable = "neon")]
25351 unsafe fn test_vqrshrnh_n_u16() {
25352 let a: u16 = 4;
25353 let e: u8 = 1;
25354 let r: u8 = transmute(vqrshrnh_n_u16::<2>(transmute(a)));
25355 assert_eq!(r, e);
25356 }
25357
25358 #[simd_test(enable = "neon")]
25359 unsafe fn test_vqrshrns_n_u32() {
25360 let a: u32 = 4;
25361 let e: u16 = 1;
25362 let r: u16 = transmute(vqrshrns_n_u32::<2>(transmute(a)));
25363 assert_eq!(r, e);
25364 }
25365
25366 #[simd_test(enable = "neon")]
25367 unsafe fn test_vqrshrnd_n_u64() {
25368 let a: u64 = 4;
25369 let e: u32 = 1;
25370 let r: u32 = transmute(vqrshrnd_n_u64::<2>(transmute(a)));
25371 assert_eq!(r, e);
25372 }
25373
25374 #[simd_test(enable = "neon")]
25375 unsafe fn test_vqrshrn_high_n_u16() {
25376 let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25377 let b: u16x8 = u16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25378 let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25379 let r: u8x16 = transmute(vqrshrn_high_n_u16::<2>(transmute(a), transmute(b)));
25380 assert_eq!(r, e);
25381 }
25382
25383 #[simd_test(enable = "neon")]
25384 unsafe fn test_vqrshrn_high_n_u32() {
25385 let a: u16x4 = u16x4::new(0, 1, 2, 3);
25386 let b: u32x4 = u32x4::new(8, 12, 24, 28);
25387 let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25388 let r: u16x8 = transmute(vqrshrn_high_n_u32::<2>(transmute(a), transmute(b)));
25389 assert_eq!(r, e);
25390 }
25391
25392 #[simd_test(enable = "neon")]
25393 unsafe fn test_vqrshrn_high_n_u64() {
25394 let a: u32x2 = u32x2::new(0, 1);
25395 let b: u64x2 = u64x2::new(8, 12);
25396 let e: u32x4 = u32x4::new(0, 1, 2, 3);
25397 let r: u32x4 = transmute(vqrshrn_high_n_u64::<2>(transmute(a), transmute(b)));
25398 assert_eq!(r, e);
25399 }
25400
25401 #[simd_test(enable = "neon")]
25402 unsafe fn test_vqrshrunh_n_s16() {
25403 let a: i16 = 4;
25404 let e: u8 = 1;
25405 let r: u8 = transmute(vqrshrunh_n_s16::<2>(transmute(a)));
25406 assert_eq!(r, e);
25407 }
25408
25409 #[simd_test(enable = "neon")]
25410 unsafe fn test_vqrshruns_n_s32() {
25411 let a: i32 = 4;
25412 let e: u16 = 1;
25413 let r: u16 = transmute(vqrshruns_n_s32::<2>(transmute(a)));
25414 assert_eq!(r, e);
25415 }
25416
25417 #[simd_test(enable = "neon")]
25418 unsafe fn test_vqrshrund_n_s64() {
25419 let a: i64 = 4;
25420 let e: u32 = 1;
25421 let r: u32 = transmute(vqrshrund_n_s64::<2>(transmute(a)));
25422 assert_eq!(r, e);
25423 }
25424
25425 #[simd_test(enable = "neon")]
25426 unsafe fn test_vqrshrun_high_n_s16() {
25427 let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25428 let b: i16x8 = i16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25429 let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25430 let r: u8x16 = transmute(vqrshrun_high_n_s16::<2>(transmute(a), transmute(b)));
25431 assert_eq!(r, e);
25432 }
25433
25434 #[simd_test(enable = "neon")]
25435 unsafe fn test_vqrshrun_high_n_s32() {
25436 let a: u16x4 = u16x4::new(0, 1, 2, 3);
25437 let b: i32x4 = i32x4::new(8, 12, 24, 28);
25438 let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25439 let r: u16x8 = transmute(vqrshrun_high_n_s32::<2>(transmute(a), transmute(b)));
25440 assert_eq!(r, e);
25441 }
25442
25443 #[simd_test(enable = "neon")]
25444 unsafe fn test_vqrshrun_high_n_s64() {
25445 let a: u32x2 = u32x2::new(0, 1);
25446 let b: i64x2 = i64x2::new(8, 12);
25447 let e: u32x4 = u32x4::new(0, 1, 2, 3);
25448 let r: u32x4 = transmute(vqrshrun_high_n_s64::<2>(transmute(a), transmute(b)));
25449 assert_eq!(r, e);
25450 }
25451
25452 #[simd_test(enable = "neon")]
25453 unsafe fn test_vqshld_s64() {
25454 let a: i64 = 0;
25455 let b: i64 = 2;
25456 let e: i64 = 0;
25457 let r: i64 = transmute(vqshld_s64(transmute(a), transmute(b)));
25458 assert_eq!(r, e);
25459 }
25460
25461 #[simd_test(enable = "neon")]
25462 unsafe fn test_vqshlb_s8() {
25463 let a: i8 = 1;
25464 let b: i8 = 2;
25465 let e: i8 = 4;
25466 let r: i8 = transmute(vqshlb_s8(transmute(a), transmute(b)));
25467 assert_eq!(r, e);
25468 }
25469
25470 #[simd_test(enable = "neon")]
25471 unsafe fn test_vqshlh_s16() {
25472 let a: i16 = 1;
25473 let b: i16 = 2;
25474 let e: i16 = 4;
25475 let r: i16 = transmute(vqshlh_s16(transmute(a), transmute(b)));
25476 assert_eq!(r, e);
25477 }
25478
25479 #[simd_test(enable = "neon")]
25480 unsafe fn test_vqshls_s32() {
25481 let a: i32 = 1;
25482 let b: i32 = 2;
25483 let e: i32 = 4;
25484 let r: i32 = transmute(vqshls_s32(transmute(a), transmute(b)));
25485 assert_eq!(r, e);
25486 }
25487
25488 #[simd_test(enable = "neon")]
25489 unsafe fn test_vqshld_u64() {
25490 let a: u64 = 0;
25491 let b: i64 = 2;
25492 let e: u64 = 0;
25493 let r: u64 = transmute(vqshld_u64(transmute(a), transmute(b)));
25494 assert_eq!(r, e);
25495 }
25496
25497 #[simd_test(enable = "neon")]
25498 unsafe fn test_vqshlb_u8() {
25499 let a: u8 = 1;
25500 let b: i8 = 2;
25501 let e: u8 = 4;
25502 let r: u8 = transmute(vqshlb_u8(transmute(a), transmute(b)));
25503 assert_eq!(r, e);
25504 }
25505
25506 #[simd_test(enable = "neon")]
25507 unsafe fn test_vqshlh_u16() {
25508 let a: u16 = 1;
25509 let b: i16 = 2;
25510 let e: u16 = 4;
25511 let r: u16 = transmute(vqshlh_u16(transmute(a), transmute(b)));
25512 assert_eq!(r, e);
25513 }
25514
25515 #[simd_test(enable = "neon")]
25516 unsafe fn test_vqshls_u32() {
25517 let a: u32 = 1;
25518 let b: i32 = 2;
25519 let e: u32 = 4;
25520 let r: u32 = transmute(vqshls_u32(transmute(a), transmute(b)));
25521 assert_eq!(r, e);
25522 }
25523
25524 #[simd_test(enable = "neon")]
25525 unsafe fn test_vqshlb_n_s8() {
25526 let a: i8 = 1;
25527 let e: i8 = 4;
25528 let r: i8 = transmute(vqshlb_n_s8::<2>(transmute(a)));
25529 assert_eq!(r, e);
25530 }
25531
25532 #[simd_test(enable = "neon")]
25533 unsafe fn test_vqshlh_n_s16() {
25534 let a: i16 = 1;
25535 let e: i16 = 4;
25536 let r: i16 = transmute(vqshlh_n_s16::<2>(transmute(a)));
25537 assert_eq!(r, e);
25538 }
25539
25540 #[simd_test(enable = "neon")]
25541 unsafe fn test_vqshls_n_s32() {
25542 let a: i32 = 1;
25543 let e: i32 = 4;
25544 let r: i32 = transmute(vqshls_n_s32::<2>(transmute(a)));
25545 assert_eq!(r, e);
25546 }
25547
25548 #[simd_test(enable = "neon")]
25549 unsafe fn test_vqshld_n_s64() {
25550 let a: i64 = 1;
25551 let e: i64 = 4;
25552 let r: i64 = transmute(vqshld_n_s64::<2>(transmute(a)));
25553 assert_eq!(r, e);
25554 }
25555
25556 #[simd_test(enable = "neon")]
25557 unsafe fn test_vqshlb_n_u8() {
25558 let a: u8 = 1;
25559 let e: u8 = 4;
25560 let r: u8 = transmute(vqshlb_n_u8::<2>(transmute(a)));
25561 assert_eq!(r, e);
25562 }
25563
25564 #[simd_test(enable = "neon")]
25565 unsafe fn test_vqshlh_n_u16() {
25566 let a: u16 = 1;
25567 let e: u16 = 4;
25568 let r: u16 = transmute(vqshlh_n_u16::<2>(transmute(a)));
25569 assert_eq!(r, e);
25570 }
25571
25572 #[simd_test(enable = "neon")]
25573 unsafe fn test_vqshls_n_u32() {
25574 let a: u32 = 1;
25575 let e: u32 = 4;
25576 let r: u32 = transmute(vqshls_n_u32::<2>(transmute(a)));
25577 assert_eq!(r, e);
25578 }
25579
25580 #[simd_test(enable = "neon")]
25581 unsafe fn test_vqshld_n_u64() {
25582 let a: u64 = 1;
25583 let e: u64 = 4;
25584 let r: u64 = transmute(vqshld_n_u64::<2>(transmute(a)));
25585 assert_eq!(r, e);
25586 }
25587
3c0e092e
XL
25588 #[simd_test(enable = "neon")]
25589 unsafe fn test_vqshlub_n_s8() {
25590 let a: i8 = 1;
25591 let e: u8 = 4;
25592 let r: u8 = transmute(vqshlub_n_s8::<2>(transmute(a)));
25593 assert_eq!(r, e);
25594 }
25595
25596 #[simd_test(enable = "neon")]
25597 unsafe fn test_vqshluh_n_s16() {
25598 let a: i16 = 1;
25599 let e: u16 = 4;
25600 let r: u16 = transmute(vqshluh_n_s16::<2>(transmute(a)));
25601 assert_eq!(r, e);
25602 }
25603
25604 #[simd_test(enable = "neon")]
25605 unsafe fn test_vqshlus_n_s32() {
25606 let a: i32 = 1;
25607 let e: u32 = 4;
25608 let r: u32 = transmute(vqshlus_n_s32::<2>(transmute(a)));
25609 assert_eq!(r, e);
25610 }
25611
25612 #[simd_test(enable = "neon")]
25613 unsafe fn test_vqshlud_n_s64() {
25614 let a: i64 = 1;
25615 let e: u64 = 4;
25616 let r: u64 = transmute(vqshlud_n_s64::<2>(transmute(a)));
25617 assert_eq!(r, e);
25618 }
25619
17df50a5
XL
25620 #[simd_test(enable = "neon")]
25621 unsafe fn test_vqshrnd_n_s64() {
25622 let a: i64 = 0;
25623 let e: i32 = 0;
25624 let r: i32 = transmute(vqshrnd_n_s64::<2>(transmute(a)));
25625 assert_eq!(r, e);
25626 }
25627
25628 #[simd_test(enable = "neon")]
25629 unsafe fn test_vqshrnh_n_s16() {
25630 let a: i16 = 4;
25631 let e: i8 = 1;
25632 let r: i8 = transmute(vqshrnh_n_s16::<2>(transmute(a)));
25633 assert_eq!(r, e);
25634 }
25635
25636 #[simd_test(enable = "neon")]
25637 unsafe fn test_vqshrns_n_s32() {
25638 let a: i32 = 4;
25639 let e: i16 = 1;
25640 let r: i16 = transmute(vqshrns_n_s32::<2>(transmute(a)));
25641 assert_eq!(r, e);
25642 }
25643
25644 #[simd_test(enable = "neon")]
25645 unsafe fn test_vqshrn_high_n_s16() {
25646 let a: i8x8 = i8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25647 let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25648 let e: i8x16 = i8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25649 let r: i8x16 = transmute(vqshrn_high_n_s16::<2>(transmute(a), transmute(b)));
25650 assert_eq!(r, e);
25651 }
25652
25653 #[simd_test(enable = "neon")]
25654 unsafe fn test_vqshrn_high_n_s32() {
25655 let a: i16x4 = i16x4::new(0, 1, 8, 9);
25656 let b: i32x4 = i32x4::new(32, 36, 40, 44);
25657 let e: i16x8 = i16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25658 let r: i16x8 = transmute(vqshrn_high_n_s32::<2>(transmute(a), transmute(b)));
25659 assert_eq!(r, e);
25660 }
25661
25662 #[simd_test(enable = "neon")]
25663 unsafe fn test_vqshrn_high_n_s64() {
25664 let a: i32x2 = i32x2::new(0, 1);
25665 let b: i64x2 = i64x2::new(32, 36);
25666 let e: i32x4 = i32x4::new(0, 1, 8, 9);
25667 let r: i32x4 = transmute(vqshrn_high_n_s64::<2>(transmute(a), transmute(b)));
25668 assert_eq!(r, e);
25669 }
25670
25671 #[simd_test(enable = "neon")]
25672 unsafe fn test_vqshrnd_n_u64() {
25673 let a: u64 = 0;
25674 let e: u32 = 0;
25675 let r: u32 = transmute(vqshrnd_n_u64::<2>(transmute(a)));
25676 assert_eq!(r, e);
25677 }
25678
25679 #[simd_test(enable = "neon")]
25680 unsafe fn test_vqshrnh_n_u16() {
25681 let a: u16 = 4;
25682 let e: u8 = 1;
25683 let r: u8 = transmute(vqshrnh_n_u16::<2>(transmute(a)));
25684 assert_eq!(r, e);
25685 }
25686
25687 #[simd_test(enable = "neon")]
25688 unsafe fn test_vqshrns_n_u32() {
25689 let a: u32 = 4;
25690 let e: u16 = 1;
25691 let r: u16 = transmute(vqshrns_n_u32::<2>(transmute(a)));
25692 assert_eq!(r, e);
25693 }
25694
25695 #[simd_test(enable = "neon")]
25696 unsafe fn test_vqshrn_high_n_u16() {
25697 let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25698 let b: u16x8 = u16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25699 let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25700 let r: u8x16 = transmute(vqshrn_high_n_u16::<2>(transmute(a), transmute(b)));
25701 assert_eq!(r, e);
25702 }
25703
25704 #[simd_test(enable = "neon")]
25705 unsafe fn test_vqshrn_high_n_u32() {
25706 let a: u16x4 = u16x4::new(0, 1, 8, 9);
25707 let b: u32x4 = u32x4::new(32, 36, 40, 44);
25708 let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25709 let r: u16x8 = transmute(vqshrn_high_n_u32::<2>(transmute(a), transmute(b)));
25710 assert_eq!(r, e);
25711 }
25712
25713 #[simd_test(enable = "neon")]
25714 unsafe fn test_vqshrn_high_n_u64() {
25715 let a: u32x2 = u32x2::new(0, 1);
25716 let b: u64x2 = u64x2::new(32, 36);
25717 let e: u32x4 = u32x4::new(0, 1, 8, 9);
25718 let r: u32x4 = transmute(vqshrn_high_n_u64::<2>(transmute(a), transmute(b)));
25719 assert_eq!(r, e);
25720 }
25721
25722 #[simd_test(enable = "neon")]
25723 unsafe fn test_vqshrunh_n_s16() {
25724 let a: i16 = 4;
25725 let e: u8 = 1;
25726 let r: u8 = transmute(vqshrunh_n_s16::<2>(transmute(a)));
25727 assert_eq!(r, e);
25728 }
25729
25730 #[simd_test(enable = "neon")]
25731 unsafe fn test_vqshruns_n_s32() {
25732 let a: i32 = 4;
25733 let e: u16 = 1;
25734 let r: u16 = transmute(vqshruns_n_s32::<2>(transmute(a)));
25735 assert_eq!(r, e);
25736 }
25737
25738 #[simd_test(enable = "neon")]
25739 unsafe fn test_vqshrund_n_s64() {
25740 let a: i64 = 4;
25741 let e: u32 = 1;
25742 let r: u32 = transmute(vqshrund_n_s64::<2>(transmute(a)));
25743 assert_eq!(r, e);
25744 }
25745
25746 #[simd_test(enable = "neon")]
25747 unsafe fn test_vqshrun_high_n_s16() {
25748 let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25749 let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25750 let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25751 let r: u8x16 = transmute(vqshrun_high_n_s16::<2>(transmute(a), transmute(b)));
25752 assert_eq!(r, e);
25753 }
25754
25755 #[simd_test(enable = "neon")]
25756 unsafe fn test_vqshrun_high_n_s32() {
25757 let a: u16x4 = u16x4::new(0, 1, 8, 9);
25758 let b: i32x4 = i32x4::new(32, 36, 40, 44);
25759 let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25760 let r: u16x8 = transmute(vqshrun_high_n_s32::<2>(transmute(a), transmute(b)));
25761 assert_eq!(r, e);
25762 }
25763
25764 #[simd_test(enable = "neon")]
25765 unsafe fn test_vqshrun_high_n_s64() {
25766 let a: u32x2 = u32x2::new(0, 1);
25767 let b: i64x2 = i64x2::new(32, 36);
25768 let e: u32x4 = u32x4::new(0, 1, 8, 9);
25769 let r: u32x4 = transmute(vqshrun_high_n_s64::<2>(transmute(a), transmute(b)));
25770 assert_eq!(r, e);
25771 }
25772
25773 #[simd_test(enable = "neon")]
3c0e092e
XL
25774 unsafe fn test_vsqaddb_u8() {
25775 let a: u8 = 2;
25776 let b: i8 = 2;
25777 let e: u8 = 4;
25778 let r: u8 = transmute(vsqaddb_u8(transmute(a), transmute(b)));
17df50a5
XL
25779 assert_eq!(r, e);
25780 }
25781
25782 #[simd_test(enable = "neon")]
3c0e092e
XL
25783 unsafe fn test_vsqaddh_u16() {
25784 let a: u16 = 2;
25785 let b: i16 = 2;
25786 let e: u16 = 4;
25787 let r: u16 = transmute(vsqaddh_u16(transmute(a), transmute(b)));
17df50a5
XL
25788 assert_eq!(r, e);
25789 }
25790
25791 #[simd_test(enable = "neon")]
3c0e092e
XL
25792 unsafe fn test_vsqadds_u32() {
25793 let a: u32 = 2;
25794 let b: i32 = 2;
25795 let e: u32 = 4;
25796 let r: u32 = transmute(vsqadds_u32(transmute(a), transmute(b)));
17df50a5
XL
25797 assert_eq!(r, e);
25798 }
25799
25800 #[simd_test(enable = "neon")]
3c0e092e
XL
25801 unsafe fn test_vsqaddd_u64() {
25802 let a: u64 = 2;
25803 let b: i64 = 2;
25804 let e: u64 = 4;
25805 let r: u64 = transmute(vsqaddd_u64(transmute(a), transmute(b)));
17df50a5
XL
25806 assert_eq!(r, e);
25807 }
25808
25809 #[simd_test(enable = "neon")]
3c0e092e
XL
25810 unsafe fn test_vsqrt_f32() {
25811 let a: f32x2 = f32x2::new(4.0, 9.0);
25812 let e: f32x2 = f32x2::new(2.0, 3.0);
25813 let r: f32x2 = transmute(vsqrt_f32(transmute(a)));
17df50a5
XL
25814 assert_eq!(r, e);
25815 }
25816
25817 #[simd_test(enable = "neon")]
3c0e092e
XL
25818 unsafe fn test_vsqrtq_f32() {
25819 let a: f32x4 = f32x4::new(4.0, 9.0, 16.0, 25.0);
25820 let e: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
25821 let r: f32x4 = transmute(vsqrtq_f32(transmute(a)));
17df50a5
XL
25822 assert_eq!(r, e);
25823 }
25824
25825 #[simd_test(enable = "neon")]
3c0e092e
XL
25826 unsafe fn test_vsqrt_f64() {
25827 let a: f64 = 4.0;
25828 let e: f64 = 2.0;
25829 let r: f64 = transmute(vsqrt_f64(transmute(a)));
17df50a5
XL
25830 assert_eq!(r, e);
25831 }
25832
25833 #[simd_test(enable = "neon")]
3c0e092e
XL
25834 unsafe fn test_vsqrtq_f64() {
25835 let a: f64x2 = f64x2::new(4.0, 9.0);
25836 let e: f64x2 = f64x2::new(2.0, 3.0);
25837 let r: f64x2 = transmute(vsqrtq_f64(transmute(a)));
17df50a5
XL
25838 assert_eq!(r, e);
25839 }
25840
25841 #[simd_test(enable = "neon")]
3c0e092e
XL
25842 unsafe fn test_vrsqrte_f64() {
25843 let a: f64 = 1.0;
25844 let e: f64 = 0.998046875;
25845 let r: f64 = transmute(vrsqrte_f64(transmute(a)));
17df50a5
XL
25846 assert_eq!(r, e);
25847 }
25848
25849 #[simd_test(enable = "neon")]
3c0e092e
XL
25850 unsafe fn test_vrsqrteq_f64() {
25851 let a: f64x2 = f64x2::new(1.0, 2.0);
25852 let e: f64x2 = f64x2::new(0.998046875, 0.705078125);
25853 let r: f64x2 = transmute(vrsqrteq_f64(transmute(a)));
17df50a5
XL
25854 assert_eq!(r, e);
25855 }
25856
25857 #[simd_test(enable = "neon")]
3c0e092e
XL
25858 unsafe fn test_vrsqrtes_f32() {
25859 let a: f32 = 1.0;
25860 let e: f32 = 0.998046875;
25861 let r: f32 = transmute(vrsqrtes_f32(transmute(a)));
17df50a5
XL
25862 assert_eq!(r, e);
25863 }
25864
25865 #[simd_test(enable = "neon")]
3c0e092e
XL
25866 unsafe fn test_vrsqrted_f64() {
25867 let a: f64 = 1.0;
25868 let e: f64 = 0.998046875;
25869 let r: f64 = transmute(vrsqrted_f64(transmute(a)));
17df50a5
XL
25870 assert_eq!(r, e);
25871 }
25872
25873 #[simd_test(enable = "neon")]
3c0e092e
XL
25874 unsafe fn test_vrsqrts_f64() {
25875 let a: f64 = 1.0;
25876 let b: f64 = 1.0;
25877 let e: f64 = 1.;
25878 let r: f64 = transmute(vrsqrts_f64(transmute(a), transmute(b)));
17df50a5
XL
25879 assert_eq!(r, e);
25880 }
25881
25882 #[simd_test(enable = "neon")]
3c0e092e
XL
25883 unsafe fn test_vrsqrtsq_f64() {
25884 let a: f64x2 = f64x2::new(1.0, 2.0);
25885 let b: f64x2 = f64x2::new(1.0, 2.0);
25886 let e: f64x2 = f64x2::new(1., -0.5);
25887 let r: f64x2 = transmute(vrsqrtsq_f64(transmute(a), transmute(b)));
17df50a5
XL
25888 assert_eq!(r, e);
25889 }
25890
25891 #[simd_test(enable = "neon")]
3c0e092e
XL
25892 unsafe fn test_vrsqrtss_f32() {
25893 let a: f32 = 1.0;
25894 let b: f32 = 1.0;
25895 let e: f32 = 1.;
25896 let r: f32 = transmute(vrsqrtss_f32(transmute(a), transmute(b)));
17df50a5
XL
25897 assert_eq!(r, e);
25898 }
25899
25900 #[simd_test(enable = "neon")]
3c0e092e
XL
25901 unsafe fn test_vrsqrtsd_f64() {
25902 let a: f64 = 1.0;
25903 let b: f64 = 1.0;
25904 let e: f64 = 1.;
25905 let r: f64 = transmute(vrsqrtsd_f64(transmute(a), transmute(b)));
17df50a5
XL
25906 assert_eq!(r, e);
25907 }
25908
25909 #[simd_test(enable = "neon")]
3c0e092e
XL
25910 unsafe fn test_vrecpe_f64() {
25911 let a: f64 = 4.0;
25912 let e: f64 = 0.24951171875;
25913 let r: f64 = transmute(vrecpe_f64(transmute(a)));
17df50a5
XL
25914 assert_eq!(r, e);
25915 }
25916
25917 #[simd_test(enable = "neon")]
3c0e092e
XL
25918 unsafe fn test_vrecpeq_f64() {
25919 let a: f64x2 = f64x2::new(4.0, 3.0);
25920 let e: f64x2 = f64x2::new(0.24951171875, 0.3330078125);
25921 let r: f64x2 = transmute(vrecpeq_f64(transmute(a)));
17df50a5
XL
25922 assert_eq!(r, e);
25923 }
25924
25925 #[simd_test(enable = "neon")]
3c0e092e
XL
25926 unsafe fn test_vrecpes_f32() {
25927 let a: f32 = 4.0;
25928 let e: f32 = 0.24951171875;
25929 let r: f32 = transmute(vrecpes_f32(transmute(a)));
17df50a5
XL
25930 assert_eq!(r, e);
25931 }
25932
25933 #[simd_test(enable = "neon")]
3c0e092e
XL
25934 unsafe fn test_vrecped_f64() {
25935 let a: f64 = 4.0;
25936 let e: f64 = 0.24951171875;
25937 let r: f64 = transmute(vrecped_f64(transmute(a)));
17df50a5
XL
25938 assert_eq!(r, e);
25939 }
25940
25941 #[simd_test(enable = "neon")]
3c0e092e
XL
25942 unsafe fn test_vrecps_f64() {
25943 let a: f64 = 4.0;
25944 let b: f64 = 4.0;
25945 let e: f64 = -14.;
25946 let r: f64 = transmute(vrecps_f64(transmute(a), transmute(b)));
17df50a5
XL
25947 assert_eq!(r, e);
25948 }
25949
25950 #[simd_test(enable = "neon")]
3c0e092e
XL
25951 unsafe fn test_vrecpsq_f64() {
25952 let a: f64x2 = f64x2::new(4.0, 3.0);
25953 let b: f64x2 = f64x2::new(4.0, 3.0);
25954 let e: f64x2 = f64x2::new(-14., -7.);
25955 let r: f64x2 = transmute(vrecpsq_f64(transmute(a), transmute(b)));
17df50a5
XL
25956 assert_eq!(r, e);
25957 }
25958
25959 #[simd_test(enable = "neon")]
3c0e092e
XL
25960 unsafe fn test_vrecpss_f32() {
25961 let a: f32 = 4.0;
25962 let b: f32 = 4.0;
25963 let e: f32 = -14.;
25964 let r: f32 = transmute(vrecpss_f32(transmute(a), transmute(b)));
17df50a5
XL
25965 assert_eq!(r, e);
25966 }
25967
25968 #[simd_test(enable = "neon")]
3c0e092e
XL
25969 unsafe fn test_vrecpsd_f64() {
25970 let a: f64 = 4.0;
25971 let b: f64 = 4.0;
25972 let e: f64 = -14.;
25973 let r: f64 = transmute(vrecpsd_f64(transmute(a), transmute(b)));
17df50a5
XL
25974 assert_eq!(r, e);
25975 }
25976
25977 #[simd_test(enable = "neon")]
3c0e092e
XL
25978 unsafe fn test_vrecpxs_f32() {
25979 let a: f32 = 4.0;
25980 let e: f32 = 0.5;
25981 let r: f32 = transmute(vrecpxs_f32(transmute(a)));
17df50a5
XL
25982 assert_eq!(r, e);
25983 }
25984
25985 #[simd_test(enable = "neon")]
3c0e092e
XL
25986 unsafe fn test_vrecpxd_f64() {
25987 let a: f64 = 4.0;
25988 let e: f64 = 0.5;
25989 let r: f64 = transmute(vrecpxd_f64(transmute(a)));
17df50a5
XL
25990 assert_eq!(r, e);
25991 }
25992
25993 #[simd_test(enable = "neon")]
3c0e092e
XL
25994 unsafe fn test_vreinterpret_s64_p64() {
25995 let a: i64x1 = i64x1::new(0);
25996 let e: i64x1 = i64x1::new(0);
25997 let r: i64x1 = transmute(vreinterpret_s64_p64(transmute(a)));
17df50a5
XL
25998 assert_eq!(r, e);
25999 }
26000
26001 #[simd_test(enable = "neon")]
3c0e092e
XL
26002 unsafe fn test_vreinterpret_u64_p64() {
26003 let a: i64x1 = i64x1::new(0);
26004 let e: u64x1 = u64x1::new(0);
26005 let r: u64x1 = transmute(vreinterpret_u64_p64(transmute(a)));
17df50a5
XL
26006 assert_eq!(r, e);
26007 }
26008
26009 #[simd_test(enable = "neon")]
3c0e092e
XL
26010 unsafe fn test_vreinterpret_p64_s64() {
26011 let a: i64x1 = i64x1::new(0);
17df50a5 26012 let e: i64x1 = i64x1::new(0);
3c0e092e 26013 let r: i64x1 = transmute(vreinterpret_p64_s64(transmute(a)));
17df50a5
XL
26014 assert_eq!(r, e);
26015 }
26016
26017 #[simd_test(enable = "neon")]
3c0e092e
XL
26018 unsafe fn test_vreinterpret_p64_u64() {
26019 let a: u64x1 = u64x1::new(0);
17df50a5 26020 let e: i64x1 = i64x1::new(0);
3c0e092e 26021 let r: i64x1 = transmute(vreinterpret_p64_u64(transmute(a)));
17df50a5
XL
26022 assert_eq!(r, e);
26023 }
26024
26025 #[simd_test(enable = "neon")]
3c0e092e
XL
26026 unsafe fn test_vreinterpretq_s64_p64() {
26027 let a: i64x2 = i64x2::new(0, 1);
26028 let e: i64x2 = i64x2::new(0, 1);
26029 let r: i64x2 = transmute(vreinterpretq_s64_p64(transmute(a)));
17df50a5
XL
26030 assert_eq!(r, e);
26031 }
26032
26033 #[simd_test(enable = "neon")]
3c0e092e
XL
26034 unsafe fn test_vreinterpretq_u64_p64() {
26035 let a: i64x2 = i64x2::new(0, 1);
26036 let e: u64x2 = u64x2::new(0, 1);
26037 let r: u64x2 = transmute(vreinterpretq_u64_p64(transmute(a)));
17df50a5
XL
26038 assert_eq!(r, e);
26039 }
26040
26041 #[simd_test(enable = "neon")]
3c0e092e
XL
26042 unsafe fn test_vreinterpretq_p64_s64() {
26043 let a: i64x2 = i64x2::new(0, 1);
17df50a5 26044 let e: i64x2 = i64x2::new(0, 1);
3c0e092e 26045 let r: i64x2 = transmute(vreinterpretq_p64_s64(transmute(a)));
17df50a5
XL
26046 assert_eq!(r, e);
26047 }
26048
26049 #[simd_test(enable = "neon")]
3c0e092e
XL
26050 unsafe fn test_vreinterpretq_p64_u64() {
26051 let a: u64x2 = u64x2::new(0, 1);
17df50a5 26052 let e: i64x2 = i64x2::new(0, 1);
3c0e092e 26053 let r: i64x2 = transmute(vreinterpretq_p64_u64(transmute(a)));
17df50a5
XL
26054 assert_eq!(r, e);
26055 }
26056
26057 #[simd_test(enable = "neon")]
26058 unsafe fn test_vreinterpret_s8_f64() {
26059 let a: f64 = 0.;
26060 let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26061 let r: i8x8 = transmute(vreinterpret_s8_f64(transmute(a)));
26062 assert_eq!(r, e);
26063 }
26064
26065 #[simd_test(enable = "neon")]
26066 unsafe fn test_vreinterpret_s16_f64() {
26067 let a: f64 = 0.;
26068 let e: i16x4 = i16x4::new(0, 0, 0, 0);
26069 let r: i16x4 = transmute(vreinterpret_s16_f64(transmute(a)));
26070 assert_eq!(r, e);
26071 }
26072
26073 #[simd_test(enable = "neon")]
26074 unsafe fn test_vreinterpret_s32_f64() {
26075 let a: f64 = 0.;
26076 let e: i32x2 = i32x2::new(0, 0);
26077 let r: i32x2 = transmute(vreinterpret_s32_f64(transmute(a)));
26078 assert_eq!(r, e);
26079 }
26080
26081 #[simd_test(enable = "neon")]
26082 unsafe fn test_vreinterpret_s64_f64() {
26083 let a: f64 = 0.;
26084 let e: i64x1 = i64x1::new(0);
26085 let r: i64x1 = transmute(vreinterpret_s64_f64(transmute(a)));
26086 assert_eq!(r, e);
26087 }
26088
26089 #[simd_test(enable = "neon")]
26090 unsafe fn test_vreinterpretq_s8_f64() {
26091 let a: f64x2 = f64x2::new(0., 0.);
26092 let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26093 let r: i8x16 = transmute(vreinterpretq_s8_f64(transmute(a)));
26094 assert_eq!(r, e);
26095 }
26096
26097 #[simd_test(enable = "neon")]
26098 unsafe fn test_vreinterpretq_s16_f64() {
26099 let a: f64x2 = f64x2::new(0., 0.);
26100 let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26101 let r: i16x8 = transmute(vreinterpretq_s16_f64(transmute(a)));
26102 assert_eq!(r, e);
26103 }
26104
26105 #[simd_test(enable = "neon")]
26106 unsafe fn test_vreinterpretq_s32_f64() {
26107 let a: f64x2 = f64x2::new(0., 0.);
26108 let e: i32x4 = i32x4::new(0, 0, 0, 0);
26109 let r: i32x4 = transmute(vreinterpretq_s32_f64(transmute(a)));
26110 assert_eq!(r, e);
26111 }
26112
26113 #[simd_test(enable = "neon")]
26114 unsafe fn test_vreinterpretq_s64_f64() {
26115 let a: f64x2 = f64x2::new(0., 0.);
26116 let e: i64x2 = i64x2::new(0, 0);
26117 let r: i64x2 = transmute(vreinterpretq_s64_f64(transmute(a)));
26118 assert_eq!(r, e);
26119 }
26120
26121 #[simd_test(enable = "neon")]
26122 unsafe fn test_vreinterpret_u8_f64() {
26123 let a: f64 = 0.;
26124 let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26125 let r: u8x8 = transmute(vreinterpret_u8_f64(transmute(a)));
26126 assert_eq!(r, e);
26127 }
26128
26129 #[simd_test(enable = "neon")]
26130 unsafe fn test_vreinterpret_u16_f64() {
26131 let a: f64 = 0.;
26132 let e: u16x4 = u16x4::new(0, 0, 0, 0);
26133 let r: u16x4 = transmute(vreinterpret_u16_f64(transmute(a)));
26134 assert_eq!(r, e);
26135 }
26136
26137 #[simd_test(enable = "neon")]
26138 unsafe fn test_vreinterpret_u32_f64() {
26139 let a: f64 = 0.;
26140 let e: u32x2 = u32x2::new(0, 0);
26141 let r: u32x2 = transmute(vreinterpret_u32_f64(transmute(a)));
26142 assert_eq!(r, e);
26143 }
26144
26145 #[simd_test(enable = "neon")]
26146 unsafe fn test_vreinterpret_u64_f64() {
26147 let a: f64 = 0.;
26148 let e: u64x1 = u64x1::new(0);
26149 let r: u64x1 = transmute(vreinterpret_u64_f64(transmute(a)));
26150 assert_eq!(r, e);
26151 }
26152
26153 #[simd_test(enable = "neon")]
26154 unsafe fn test_vreinterpretq_u8_f64() {
26155 let a: f64x2 = f64x2::new(0., 0.);
26156 let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26157 let r: u8x16 = transmute(vreinterpretq_u8_f64(transmute(a)));
26158 assert_eq!(r, e);
26159 }
26160
26161 #[simd_test(enable = "neon")]
26162 unsafe fn test_vreinterpretq_u16_f64() {
26163 let a: f64x2 = f64x2::new(0., 0.);
26164 let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26165 let r: u16x8 = transmute(vreinterpretq_u16_f64(transmute(a)));
26166 assert_eq!(r, e);
26167 }
26168
26169 #[simd_test(enable = "neon")]
26170 unsafe fn test_vreinterpretq_u32_f64() {
26171 let a: f64x2 = f64x2::new(0., 0.);
26172 let e: u32x4 = u32x4::new(0, 0, 0, 0);
26173 let r: u32x4 = transmute(vreinterpretq_u32_f64(transmute(a)));
26174 assert_eq!(r, e);
26175 }
26176
26177 #[simd_test(enable = "neon")]
26178 unsafe fn test_vreinterpretq_u64_f64() {
26179 let a: f64x2 = f64x2::new(0., 0.);
26180 let e: u64x2 = u64x2::new(0, 0);
26181 let r: u64x2 = transmute(vreinterpretq_u64_f64(transmute(a)));
26182 assert_eq!(r, e);
26183 }
26184
26185 #[simd_test(enable = "neon")]
26186 unsafe fn test_vreinterpret_p8_f64() {
26187 let a: f64 = 0.;
26188 let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26189 let r: i8x8 = transmute(vreinterpret_p8_f64(transmute(a)));
26190 assert_eq!(r, e);
26191 }
26192
26193 #[simd_test(enable = "neon")]
26194 unsafe fn test_vreinterpret_p16_f64() {
26195 let a: f64 = 0.;
26196 let e: i16x4 = i16x4::new(0, 0, 0, 0);
26197 let r: i16x4 = transmute(vreinterpret_p16_f64(transmute(a)));
26198 assert_eq!(r, e);
26199 }
26200
26201 #[simd_test(enable = "neon")]
26202 unsafe fn test_vreinterpret_p64_f32() {
26203 let a: f32x2 = f32x2::new(0., 0.);
26204 let e: i64x1 = i64x1::new(0);
26205 let r: i64x1 = transmute(vreinterpret_p64_f32(transmute(a)));
26206 assert_eq!(r, e);
26207 }
26208
26209 #[simd_test(enable = "neon")]
26210 unsafe fn test_vreinterpret_p64_f64() {
26211 let a: f64 = 0.;
26212 let e: i64x1 = i64x1::new(0);
26213 let r: i64x1 = transmute(vreinterpret_p64_f64(transmute(a)));
26214 assert_eq!(r, e);
26215 }
26216
26217 #[simd_test(enable = "neon")]
26218 unsafe fn test_vreinterpretq_p8_f64() {
26219 let a: f64x2 = f64x2::new(0., 0.);
26220 let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26221 let r: i8x16 = transmute(vreinterpretq_p8_f64(transmute(a)));
26222 assert_eq!(r, e);
26223 }
26224
26225 #[simd_test(enable = "neon")]
26226 unsafe fn test_vreinterpretq_p16_f64() {
26227 let a: f64x2 = f64x2::new(0., 0.);
26228 let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26229 let r: i16x8 = transmute(vreinterpretq_p16_f64(transmute(a)));
26230 assert_eq!(r, e);
26231 }
26232
26233 #[simd_test(enable = "neon")]
26234 unsafe fn test_vreinterpretq_p64_f32() {
26235 let a: f32x4 = f32x4::new(0., 0., 0., 0.);
26236 let e: i64x2 = i64x2::new(0, 0);
26237 let r: i64x2 = transmute(vreinterpretq_p64_f32(transmute(a)));
26238 assert_eq!(r, e);
26239 }
26240
26241 #[simd_test(enable = "neon")]
26242 unsafe fn test_vreinterpretq_p64_f64() {
26243 let a: f64x2 = f64x2::new(0., 0.);
26244 let e: i64x2 = i64x2::new(0, 0);
26245 let r: i64x2 = transmute(vreinterpretq_p64_f64(transmute(a)));
26246 assert_eq!(r, e);
26247 }
26248
3c0e092e
XL
26249 #[simd_test(enable = "neon")]
26250 unsafe fn test_vreinterpretq_p128_f64() {
26251 let a: f64x2 = f64x2::new(0., 0.);
26252 let e: p128 = 0;
26253 let r: p128 = transmute(vreinterpretq_p128_f64(transmute(a)));
26254 assert_eq!(r, e);
26255 }
26256
17df50a5
XL
26257 #[simd_test(enable = "neon")]
26258 unsafe fn test_vreinterpret_f64_s8() {
26259 let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26260 let e: f64 = 0.;
26261 let r: f64 = transmute(vreinterpret_f64_s8(transmute(a)));
26262 assert_eq!(r, e);
26263 }
26264
26265 #[simd_test(enable = "neon")]
26266 unsafe fn test_vreinterpret_f64_s16() {
26267 let a: i16x4 = i16x4::new(0, 0, 0, 0);
26268 let e: f64 = 0.;
26269 let r: f64 = transmute(vreinterpret_f64_s16(transmute(a)));
26270 assert_eq!(r, e);
26271 }
26272
26273 #[simd_test(enable = "neon")]
26274 unsafe fn test_vreinterpret_f64_s32() {
26275 let a: i32x2 = i32x2::new(0, 0);
26276 let e: f64 = 0.;
26277 let r: f64 = transmute(vreinterpret_f64_s32(transmute(a)));
26278 assert_eq!(r, e);
26279 }
26280
26281 #[simd_test(enable = "neon")]
26282 unsafe fn test_vreinterpret_f64_s64() {
26283 let a: i64x1 = i64x1::new(0);
26284 let e: f64 = 0.;
26285 let r: f64 = transmute(vreinterpret_f64_s64(transmute(a)));
26286 assert_eq!(r, e);
26287 }
26288
26289 #[simd_test(enable = "neon")]
26290 unsafe fn test_vreinterpretq_f64_s8() {
26291 let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26292 let e: f64x2 = f64x2::new(0., 0.);
26293 let r: f64x2 = transmute(vreinterpretq_f64_s8(transmute(a)));
26294 assert_eq!(r, e);
26295 }
26296
26297 #[simd_test(enable = "neon")]
26298 unsafe fn test_vreinterpretq_f64_s16() {
26299 let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26300 let e: f64x2 = f64x2::new(0., 0.);
26301 let r: f64x2 = transmute(vreinterpretq_f64_s16(transmute(a)));
26302 assert_eq!(r, e);
26303 }
26304
26305 #[simd_test(enable = "neon")]
26306 unsafe fn test_vreinterpretq_f64_s32() {
26307 let a: i32x4 = i32x4::new(0, 0, 0, 0);
26308 let e: f64x2 = f64x2::new(0., 0.);
26309 let r: f64x2 = transmute(vreinterpretq_f64_s32(transmute(a)));
26310 assert_eq!(r, e);
26311 }
26312
26313 #[simd_test(enable = "neon")]
26314 unsafe fn test_vreinterpretq_f64_s64() {
26315 let a: i64x2 = i64x2::new(0, 0);
26316 let e: f64x2 = f64x2::new(0., 0.);
26317 let r: f64x2 = transmute(vreinterpretq_f64_s64(transmute(a)));
26318 assert_eq!(r, e);
26319 }
26320
26321 #[simd_test(enable = "neon")]
26322 unsafe fn test_vreinterpret_f64_p8() {
26323 let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26324 let e: f64 = 0.;
26325 let r: f64 = transmute(vreinterpret_f64_p8(transmute(a)));
26326 assert_eq!(r, e);
26327 }
26328
26329 #[simd_test(enable = "neon")]
26330 unsafe fn test_vreinterpret_f64_u16() {
26331 let a: u16x4 = u16x4::new(0, 0, 0, 0);
26332 let e: f64 = 0.;
26333 let r: f64 = transmute(vreinterpret_f64_u16(transmute(a)));
26334 assert_eq!(r, e);
26335 }
26336
26337 #[simd_test(enable = "neon")]
26338 unsafe fn test_vreinterpret_f64_u32() {
26339 let a: u32x2 = u32x2::new(0, 0);
26340 let e: f64 = 0.;
26341 let r: f64 = transmute(vreinterpret_f64_u32(transmute(a)));
26342 assert_eq!(r, e);
26343 }
26344
26345 #[simd_test(enable = "neon")]
26346 unsafe fn test_vreinterpret_f64_u64() {
26347 let a: u64x1 = u64x1::new(0);
26348 let e: f64 = 0.;
26349 let r: f64 = transmute(vreinterpret_f64_u64(transmute(a)));
26350 assert_eq!(r, e);
26351 }
26352
26353 #[simd_test(enable = "neon")]
26354 unsafe fn test_vreinterpretq_f64_p8() {
26355 let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26356 let e: f64x2 = f64x2::new(0., 0.);
26357 let r: f64x2 = transmute(vreinterpretq_f64_p8(transmute(a)));
26358 assert_eq!(r, e);
26359 }
26360
26361 #[simd_test(enable = "neon")]
26362 unsafe fn test_vreinterpretq_f64_u16() {
26363 let a: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26364 let e: f64x2 = f64x2::new(0., 0.);
26365 let r: f64x2 = transmute(vreinterpretq_f64_u16(transmute(a)));
26366 assert_eq!(r, e);
26367 }
26368
26369 #[simd_test(enable = "neon")]
26370 unsafe fn test_vreinterpretq_f64_u32() {
26371 let a: u32x4 = u32x4::new(0, 0, 0, 0);
26372 let e: f64x2 = f64x2::new(0., 0.);
26373 let r: f64x2 = transmute(vreinterpretq_f64_u32(transmute(a)));
26374 assert_eq!(r, e);
26375 }
26376
26377 #[simd_test(enable = "neon")]
26378 unsafe fn test_vreinterpretq_f64_u64() {
26379 let a: u64x2 = u64x2::new(0, 0);
26380 let e: f64x2 = f64x2::new(0., 0.);
26381 let r: f64x2 = transmute(vreinterpretq_f64_u64(transmute(a)));
26382 assert_eq!(r, e);
26383 }
26384
26385 #[simd_test(enable = "neon")]
26386 unsafe fn test_vreinterpret_f64_u8() {
26387 let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26388 let e: f64 = 0.;
26389 let r: f64 = transmute(vreinterpret_f64_u8(transmute(a)));
26390 assert_eq!(r, e);
26391 }
26392
26393 #[simd_test(enable = "neon")]
26394 unsafe fn test_vreinterpret_f64_p16() {
26395 let a: i16x4 = i16x4::new(0, 0, 0, 0);
26396 let e: f64 = 0.;
26397 let r: f64 = transmute(vreinterpret_f64_p16(transmute(a)));
26398 assert_eq!(r, e);
26399 }
26400
26401 #[simd_test(enable = "neon")]
26402 unsafe fn test_vreinterpret_f64_p64() {
26403 let a: i64x1 = i64x1::new(0);
26404 let e: f64 = 0.;
26405 let r: f64 = transmute(vreinterpret_f64_p64(transmute(a)));
26406 assert_eq!(r, e);
26407 }
26408
26409 #[simd_test(enable = "neon")]
26410 unsafe fn test_vreinterpret_f32_p64() {
26411 let a: i64x1 = i64x1::new(0);
26412 let e: f32x2 = f32x2::new(0., 0.);
26413 let r: f32x2 = transmute(vreinterpret_f32_p64(transmute(a)));
26414 assert_eq!(r, e);
26415 }
26416
26417 #[simd_test(enable = "neon")]
26418 unsafe fn test_vreinterpretq_f64_u8() {
26419 let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26420 let e: f64x2 = f64x2::new(0., 0.);
26421 let r: f64x2 = transmute(vreinterpretq_f64_u8(transmute(a)));
26422 assert_eq!(r, e);
26423 }
26424
26425 #[simd_test(enable = "neon")]
26426 unsafe fn test_vreinterpretq_f64_p16() {
26427 let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26428 let e: f64x2 = f64x2::new(0., 0.);
26429 let r: f64x2 = transmute(vreinterpretq_f64_p16(transmute(a)));
26430 assert_eq!(r, e);
26431 }
26432
26433 #[simd_test(enable = "neon")]
26434 unsafe fn test_vreinterpretq_f64_p64() {
26435 let a: i64x2 = i64x2::new(0, 0);
26436 let e: f64x2 = f64x2::new(0., 0.);
26437 let r: f64x2 = transmute(vreinterpretq_f64_p64(transmute(a)));
26438 assert_eq!(r, e);
26439 }
26440
26441 #[simd_test(enable = "neon")]
26442 unsafe fn test_vreinterpretq_f32_p64() {
26443 let a: i64x2 = i64x2::new(0, 0);
26444 let e: f32x4 = f32x4::new(0., 0., 0., 0.);
26445 let r: f32x4 = transmute(vreinterpretq_f32_p64(transmute(a)));
26446 assert_eq!(r, e);
26447 }
26448
3c0e092e
XL
26449 #[simd_test(enable = "neon")]
26450 unsafe fn test_vreinterpretq_f64_p128() {
26451 let a: p128 = 0;
26452 let e: f64x2 = f64x2::new(0., 0.);
26453 let r: f64x2 = transmute(vreinterpretq_f64_p128(transmute(a)));
26454 assert_eq!(r, e);
26455 }
26456
17df50a5
XL
26457 #[simd_test(enable = "neon")]
26458 unsafe fn test_vreinterpret_f64_f32() {
26459 let a: f32x2 = f32x2::new(0., 0.);
26460 let e: f64 = 0.;
26461 let r: f64 = transmute(vreinterpret_f64_f32(transmute(a)));
26462 assert_eq!(r, e);
26463 }
26464
26465 #[simd_test(enable = "neon")]
26466 unsafe fn test_vreinterpret_f32_f64() {
26467 let a: f64 = 0.;
26468 let e: f32x2 = f32x2::new(0., 0.);
26469 let r: f32x2 = transmute(vreinterpret_f32_f64(transmute(a)));
26470 assert_eq!(r, e);
26471 }
26472
26473 #[simd_test(enable = "neon")]
26474 unsafe fn test_vreinterpretq_f64_f32() {
26475 let a: f32x4 = f32x4::new(0., 0., 0., 0.);
26476 let e: f64x2 = f64x2::new(0., 0.);
26477 let r: f64x2 = transmute(vreinterpretq_f64_f32(transmute(a)));
26478 assert_eq!(r, e);
26479 }
26480
26481 #[simd_test(enable = "neon")]
26482 unsafe fn test_vreinterpretq_f32_f64() {
26483 let a: f64x2 = f64x2::new(0., 0.);
26484 let e: f32x4 = f32x4::new(0., 0., 0., 0.);
26485 let r: f32x4 = transmute(vreinterpretq_f32_f64(transmute(a)));
26486 assert_eq!(r, e);
26487 }
26488
26489 #[simd_test(enable = "neon")]
26490 unsafe fn test_vrshld_s64() {
26491 let a: i64 = 1;
26492 let b: i64 = 2;
26493 let e: i64 = 4;
26494 let r: i64 = transmute(vrshld_s64(transmute(a), transmute(b)));
26495 assert_eq!(r, e);
26496 }
26497
26498 #[simd_test(enable = "neon")]
26499 unsafe fn test_vrshld_u64() {
26500 let a: u64 = 1;
26501 let b: i64 = 2;
26502 let e: u64 = 4;
26503 let r: u64 = transmute(vrshld_u64(transmute(a), transmute(b)));
26504 assert_eq!(r, e);
26505 }
26506
26507 #[simd_test(enable = "neon")]
26508 unsafe fn test_vrshrd_n_s64() {
26509 let a: i64 = 4;
26510 let e: i64 = 1;
26511 let r: i64 = transmute(vrshrd_n_s64::<2>(transmute(a)));
26512 assert_eq!(r, e);
26513 }
26514
26515 #[simd_test(enable = "neon")]
26516 unsafe fn test_vrshrd_n_u64() {
26517 let a: u64 = 4;
26518 let e: u64 = 1;
26519 let r: u64 = transmute(vrshrd_n_u64::<2>(transmute(a)));
26520 assert_eq!(r, e);
26521 }
26522
26523 #[simd_test(enable = "neon")]
26524 unsafe fn test_vrshrn_high_n_s16() {
26525 let a: i8x8 = i8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26526 let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
26527 let e: i8x16 = i8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
26528 let r: i8x16 = transmute(vrshrn_high_n_s16::<2>(transmute(a), transmute(b)));
26529 assert_eq!(r, e);
26530 }
26531
26532 #[simd_test(enable = "neon")]
26533 unsafe fn test_vrshrn_high_n_s32() {
26534 let a: i16x4 = i16x4::new(0, 1, 8, 9);
26535 let b: i32x4 = i32x4::new(32, 36, 40, 44);
26536 let e: i16x8 = i16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26537 let r: i16x8 = transmute(vrshrn_high_n_s32::<2>(transmute(a), transmute(b)));
26538 assert_eq!(r, e);
26539 }
26540
26541 #[simd_test(enable = "neon")]
26542 unsafe fn test_vrshrn_high_n_s64() {
26543 let a: i32x2 = i32x2::new(0, 1);
26544 let b: i64x2 = i64x2::new(32, 36);
26545 let e: i32x4 = i32x4::new(0, 1, 8, 9);
26546 let r: i32x4 = transmute(vrshrn_high_n_s64::<2>(transmute(a), transmute(b)));
26547 assert_eq!(r, e);
26548 }
26549
26550 #[simd_test(enable = "neon")]
26551 unsafe fn test_vrshrn_high_n_u16() {
26552 let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26553 let b: u16x8 = u16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
26554 let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
26555 let r: u8x16 = transmute(vrshrn_high_n_u16::<2>(transmute(a), transmute(b)));
26556 assert_eq!(r, e);
26557 }
26558
26559 #[simd_test(enable = "neon")]
26560 unsafe fn test_vrshrn_high_n_u32() {
26561 let a: u16x4 = u16x4::new(0, 1, 8, 9);
26562 let b: u32x4 = u32x4::new(32, 36, 40, 44);
26563 let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26564 let r: u16x8 = transmute(vrshrn_high_n_u32::<2>(transmute(a), transmute(b)));
26565 assert_eq!(r, e);
26566 }
26567
26568 #[simd_test(enable = "neon")]
26569 unsafe fn test_vrshrn_high_n_u64() {
26570 let a: u32x2 = u32x2::new(0, 1);
26571 let b: u64x2 = u64x2::new(32, 36);
26572 let e: u32x4 = u32x4::new(0, 1, 8, 9);
26573 let r: u32x4 = transmute(vrshrn_high_n_u64::<2>(transmute(a), transmute(b)));
26574 assert_eq!(r, e);
26575 }
26576
26577 #[simd_test(enable = "neon")]
26578 unsafe fn test_vrsrad_n_s64() {
26579 let a: i64 = 1;
26580 let b: i64 = 4;
26581 let e: i64 = 2;
26582 let r: i64 = transmute(vrsrad_n_s64::<2>(transmute(a), transmute(b)));
26583 assert_eq!(r, e);
26584 }
26585
26586 #[simd_test(enable = "neon")]
26587 unsafe fn test_vrsrad_n_u64() {
26588 let a: u64 = 1;
26589 let b: u64 = 4;
26590 let e: u64 = 2;
26591 let r: u64 = transmute(vrsrad_n_u64::<2>(transmute(a), transmute(b)));
26592 assert_eq!(r, e);
26593 }
26594
3c0e092e
XL
26595 #[simd_test(enable = "neon")]
26596 unsafe fn test_vrsubhn_high_s16() {
26597 let a: i8x8 = i8x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26598 let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26599 let c: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26600 let e: i8x16 = i8x16::new(1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26601 let r: i8x16 = transmute(vrsubhn_high_s16(transmute(a), transmute(b), transmute(c)));
26602 assert_eq!(r, e);
26603 }
26604
26605 #[simd_test(enable = "neon")]
26606 unsafe fn test_vrsubhn_high_s32() {
26607 let a: i16x4 = i16x4::new(1, 2, 0, 0);
26608 let b: i32x4 = i32x4::new(1, 2, 3, 4);
26609 let c: i32x4 = i32x4::new(1, 2, 3, 4);
26610 let e: i16x8 = i16x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26611 let r: i16x8 = transmute(vrsubhn_high_s32(transmute(a), transmute(b), transmute(c)));
26612 assert_eq!(r, e);
26613 }
26614
26615 #[simd_test(enable = "neon")]
26616 unsafe fn test_vrsubhn_high_s64() {
26617 let a: i32x2 = i32x2::new(1, 2);
26618 let b: i64x2 = i64x2::new(1, 2);
26619 let c: i64x2 = i64x2::new(1, 2);
26620 let e: i32x4 = i32x4::new(1, 2, 0, 0);
26621 let r: i32x4 = transmute(vrsubhn_high_s64(transmute(a), transmute(b), transmute(c)));
26622 assert_eq!(r, e);
26623 }
26624
26625 #[simd_test(enable = "neon")]
26626 unsafe fn test_vrsubhn_high_u16() {
26627 let a: u8x8 = u8x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26628 let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26629 let c: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26630 let e: u8x16 = u8x16::new(1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26631 let r: u8x16 = transmute(vrsubhn_high_u16(transmute(a), transmute(b), transmute(c)));
26632 assert_eq!(r, e);
26633 }
26634
26635 #[simd_test(enable = "neon")]
26636 unsafe fn test_vrsubhn_high_u32() {
26637 let a: u16x4 = u16x4::new(1, 2, 0, 0);
26638 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26639 let c: u32x4 = u32x4::new(1, 2, 3, 4);
26640 let e: u16x8 = u16x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26641 let r: u16x8 = transmute(vrsubhn_high_u32(transmute(a), transmute(b), transmute(c)));
26642 assert_eq!(r, e);
26643 }
26644
26645 #[simd_test(enable = "neon")]
26646 unsafe fn test_vrsubhn_high_u64() {
26647 let a: u32x2 = u32x2::new(1, 2);
26648 let b: u64x2 = u64x2::new(1, 2);
26649 let c: u64x2 = u64x2::new(1, 2);
26650 let e: u32x4 = u32x4::new(1, 2, 0, 0);
26651 let r: u32x4 = transmute(vrsubhn_high_u64(transmute(a), transmute(b), transmute(c)));
26652 assert_eq!(r, e);
26653 }
26654
17df50a5
XL
26655 #[simd_test(enable = "neon")]
26656 unsafe fn test_vset_lane_f64() {
26657 let a: f64 = 1.;
26658 let b: f64 = 0.;
26659 let e: f64 = 1.;
26660 let r: f64 = transmute(vset_lane_f64::<0>(transmute(a), transmute(b)));
26661 assert_eq!(r, e);
26662 }
26663
26664 #[simd_test(enable = "neon")]
26665 unsafe fn test_vsetq_lane_f64() {
26666 let a: f64 = 1.;
26667 let b: f64x2 = f64x2::new(0., 2.);
26668 let e: f64x2 = f64x2::new(1., 2.);
26669 let r: f64x2 = transmute(vsetq_lane_f64::<0>(transmute(a), transmute(b)));
26670 assert_eq!(r, e);
26671 }
26672
26673 #[simd_test(enable = "neon")]
26674 unsafe fn test_vshld_s64() {
26675 let a: i64 = 1;
26676 let b: i64 = 2;
26677 let e: i64 = 4;
26678 let r: i64 = transmute(vshld_s64(transmute(a), transmute(b)));
26679 assert_eq!(r, e);
26680 }
26681
26682 #[simd_test(enable = "neon")]
26683 unsafe fn test_vshld_u64() {
26684 let a: u64 = 1;
26685 let b: i64 = 2;
26686 let e: u64 = 4;
26687 let r: u64 = transmute(vshld_u64(transmute(a), transmute(b)));
26688 assert_eq!(r, e);
26689 }
26690
26691 #[simd_test(enable = "neon")]
26692 unsafe fn test_vshll_high_n_s8() {
26693 let a: i8x16 = i8x16::new(0, 0, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8);
26694 let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
26695 let r: i16x8 = transmute(vshll_high_n_s8::<2>(transmute(a)));
26696 assert_eq!(r, e);
26697 }
26698
26699 #[simd_test(enable = "neon")]
26700 unsafe fn test_vshll_high_n_s16() {
26701 let a: i16x8 = i16x8::new(0, 0, 1, 2, 1, 2, 3, 4);
26702 let e: i32x4 = i32x4::new(4, 8, 12, 16);
26703 let r: i32x4 = transmute(vshll_high_n_s16::<2>(transmute(a)));
26704 assert_eq!(r, e);
26705 }
26706
26707 #[simd_test(enable = "neon")]
26708 unsafe fn test_vshll_high_n_s32() {
26709 let a: i32x4 = i32x4::new(0, 0, 1, 2);
26710 let e: i64x2 = i64x2::new(4, 8);
26711 let r: i64x2 = transmute(vshll_high_n_s32::<2>(transmute(a)));
26712 assert_eq!(r, e);
26713 }
26714
26715 #[simd_test(enable = "neon")]
26716 unsafe fn test_vshll_high_n_u8() {
26717 let a: u8x16 = u8x16::new(0, 0, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8);
26718 let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
26719 let r: u16x8 = transmute(vshll_high_n_u8::<2>(transmute(a)));
26720 assert_eq!(r, e);
26721 }
26722
26723 #[simd_test(enable = "neon")]
26724 unsafe fn test_vshll_high_n_u16() {
26725 let a: u16x8 = u16x8::new(0, 0, 1, 2, 1, 2, 3, 4);
26726 let e: u32x4 = u32x4::new(4, 8, 12, 16);
26727 let r: u32x4 = transmute(vshll_high_n_u16::<2>(transmute(a)));
26728 assert_eq!(r, e);
26729 }
26730
26731 #[simd_test(enable = "neon")]
26732 unsafe fn test_vshll_high_n_u32() {
26733 let a: u32x4 = u32x4::new(0, 0, 1, 2);
26734 let e: u64x2 = u64x2::new(4, 8);
26735 let r: u64x2 = transmute(vshll_high_n_u32::<2>(transmute(a)));
26736 assert_eq!(r, e);
26737 }
26738
26739 #[simd_test(enable = "neon")]
26740 unsafe fn test_vshrn_high_n_s16() {
26741 let a: i8x8 = i8x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26742 let b: i16x8 = i16x8::new(20, 24, 28, 32, 52, 56, 60, 64);
26743 let e: i8x16 = i8x16::new(1, 2, 5, 6, 5, 6, 7, 8, 5, 6, 7, 8, 13, 14, 15, 16);
26744 let r: i8x16 = transmute(vshrn_high_n_s16::<2>(transmute(a), transmute(b)));
26745 assert_eq!(r, e);
26746 }
26747
26748 #[simd_test(enable = "neon")]
26749 unsafe fn test_vshrn_high_n_s32() {
26750 let a: i16x4 = i16x4::new(1, 2, 5, 6);
26751 let b: i32x4 = i32x4::new(20, 24, 28, 32);
26752 let e: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26753 let r: i16x8 = transmute(vshrn_high_n_s32::<2>(transmute(a), transmute(b)));
26754 assert_eq!(r, e);
26755 }
26756
26757 #[simd_test(enable = "neon")]
26758 unsafe fn test_vshrn_high_n_s64() {
26759 let a: i32x2 = i32x2::new(1, 2);
26760 let b: i64x2 = i64x2::new(20, 24);
26761 let e: i32x4 = i32x4::new(1, 2, 5, 6);
26762 let r: i32x4 = transmute(vshrn_high_n_s64::<2>(transmute(a), transmute(b)));
26763 assert_eq!(r, e);
26764 }
26765
26766 #[simd_test(enable = "neon")]
26767 unsafe fn test_vshrn_high_n_u16() {
26768 let a: u8x8 = u8x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26769 let b: u16x8 = u16x8::new(20, 24, 28, 32, 52, 56, 60, 64);
26770 let e: u8x16 = u8x16::new(1, 2, 5, 6, 5, 6, 7, 8, 5, 6, 7, 8, 13, 14, 15, 16);
26771 let r: u8x16 = transmute(vshrn_high_n_u16::<2>(transmute(a), transmute(b)));
26772 assert_eq!(r, e);
26773 }
26774
26775 #[simd_test(enable = "neon")]
26776 unsafe fn test_vshrn_high_n_u32() {
26777 let a: u16x4 = u16x4::new(1, 2, 5, 6);
26778 let b: u32x4 = u32x4::new(20, 24, 28, 32);
26779 let e: u16x8 = u16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26780 let r: u16x8 = transmute(vshrn_high_n_u32::<2>(transmute(a), transmute(b)));
26781 assert_eq!(r, e);
26782 }
26783
26784 #[simd_test(enable = "neon")]
26785 unsafe fn test_vshrn_high_n_u64() {
26786 let a: u32x2 = u32x2::new(1, 2);
26787 let b: u64x2 = u64x2::new(20, 24);
26788 let e: u32x4 = u32x4::new(1, 2, 5, 6);
26789 let r: u32x4 = transmute(vshrn_high_n_u64::<2>(transmute(a), transmute(b)));
26790 assert_eq!(r, e);
26791 }
26792
3c0e092e
XL
26793 #[simd_test(enable = "neon,sm4")]
26794 unsafe fn test_vsm3partw1q_u32() {
26795 let a: u32x4 = u32x4::new(1, 2, 3, 4);
26796 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26797 let c: u32x4 = u32x4::new(1, 2, 3, 4);
26798 let e: u32x4 = u32x4::new(2147549312, 3221323968, 131329, 2684362752);
26799 let r: u32x4 = transmute(vsm3partw1q_u32(transmute(a), transmute(b), transmute(c)));
26800 assert_eq!(r, e);
26801 }
26802
26803 #[simd_test(enable = "neon,sm4")]
26804 unsafe fn test_vsm3partw2q_u32() {
26805 let a: u32x4 = u32x4::new(1, 2, 3, 4);
26806 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26807 let c: u32x4 = u32x4::new(1, 2, 3, 4);
26808 let e: u32x4 = u32x4::new(128, 256, 384, 1077977696);
26809 let r: u32x4 = transmute(vsm3partw2q_u32(transmute(a), transmute(b), transmute(c)));
26810 assert_eq!(r, e);
26811 }
26812
26813 #[simd_test(enable = "neon,sm4")]
26814 unsafe fn test_vsm3ss1q_u32() {
26815 let a: u32x4 = u32x4::new(1, 2, 3, 4);
26816 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26817 let c: u32x4 = u32x4::new(1, 2, 3, 4);
26818 let e: u32x4 = u32x4::new(0, 0, 0, 2098176);
26819 let r: u32x4 = transmute(vsm3ss1q_u32(transmute(a), transmute(b), transmute(c)));
26820 assert_eq!(r, e);
26821 }
26822
26823 #[simd_test(enable = "neon,sm4")]
26824 unsafe fn test_vsm4ekeyq_u32() {
26825 let a: u32x4 = u32x4::new(1, 2, 3, 4);
26826 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26827 let e: u32x4 = u32x4::new(1784948604, 136020997, 2940231695, 3789947679);
26828 let r: u32x4 = transmute(vsm4ekeyq_u32(transmute(a), transmute(b)));
26829 assert_eq!(r, e);
26830 }
26831
26832 #[simd_test(enable = "neon,sm4")]
26833 unsafe fn test_vsm4eq_u32() {
26834 let a: u32x4 = u32x4::new(1, 2, 3, 4);
26835 let b: u32x4 = u32x4::new(1, 2, 3, 4);
26836 let e: u32x4 = u32x4::new(1093874472, 3616769504, 3878330411, 2765298765);
26837 let r: u32x4 = transmute(vsm4eq_u32(transmute(a), transmute(b)));
26838 assert_eq!(r, e);
26839 }
26840
26841 #[simd_test(enable = "neon,sha3")]
26842 unsafe fn test_vrax1q_u64() {
26843 let a: u64x2 = u64x2::new(1, 2);
26844 let b: u64x2 = u64x2::new(3, 4);
26845 let e: u64x2 = u64x2::new(7, 10);
26846 let r: u64x2 = transmute(vrax1q_u64(transmute(a), transmute(b)));
26847 assert_eq!(r, e);
26848 }
26849
26850 #[simd_test(enable = "neon,sha3")]
26851 unsafe fn test_vsha512hq_u64() {
26852 let a: u64x2 = u64x2::new(1, 2);
26853 let b: u64x2 = u64x2::new(3, 4);
26854 let c: u64x2 = u64x2::new(5, 6);
26855 let e: u64x2 = u64x2::new(11189044327219203, 7177611956453380);
26856 let r: u64x2 = transmute(vsha512hq_u64(transmute(a), transmute(b), transmute(c)));
26857 assert_eq!(r, e);
26858 }
26859
26860 #[simd_test(enable = "neon,sha3")]
26861 unsafe fn test_vsha512h2q_u64() {
26862 let a: u64x2 = u64x2::new(1, 2);
26863 let b: u64x2 = u64x2::new(3, 4);
26864 let c: u64x2 = u64x2::new(5, 6);
26865 let e: u64x2 = u64x2::new(5770237651009406214, 349133864969);
26866 let r: u64x2 = transmute(vsha512h2q_u64(transmute(a), transmute(b), transmute(c)));
26867 assert_eq!(r, e);
26868 }
26869
26870 #[simd_test(enable = "neon,sha3")]
26871 unsafe fn test_vsha512su0q_u64() {
26872 let a: u64x2 = u64x2::new(1, 2);
26873 let b: u64x2 = u64x2::new(3, 4);
26874 let e: u64x2 = u64x2::new(144115188075855874, 9439544818968559619);
26875 let r: u64x2 = transmute(vsha512su0q_u64(transmute(a), transmute(b)));
26876 assert_eq!(r, e);
26877 }
26878
26879 #[simd_test(enable = "neon,sha3")]
26880 unsafe fn test_vsha512su1q_u64() {
26881 let a: u64x2 = u64x2::new(1, 2);
26882 let b: u64x2 = u64x2::new(3, 4);
26883 let c: u64x2 = u64x2::new(5, 6);
26884 let e: u64x2 = u64x2::new(105553116266526, 140737488355368);
26885 let r: u64x2 = transmute(vsha512su1q_u64(transmute(a), transmute(b), transmute(c)));
26886 assert_eq!(r, e);
26887 }
26888
26889 #[simd_test(enable = "neon,frintts")]
26890 unsafe fn test_vrnd32x_f32() {
26891 let a: f32x2 = f32x2::new(1.1, 1.9);
26892 let e: f32x2 = f32x2::new(1.0, 2.0);
26893 let r: f32x2 = transmute(vrnd32x_f32(transmute(a)));
26894 assert_eq!(r, e);
26895 }
26896
26897 #[simd_test(enable = "neon,frintts")]
26898 unsafe fn test_vrnd32xq_f32() {
26899 let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
26900 let e: f32x4 = f32x4::new(1.0, 2.0, -2.0, -2.0);
26901 let r: f32x4 = transmute(vrnd32xq_f32(transmute(a)));
26902 assert_eq!(r, e);
26903 }
26904
26905 #[simd_test(enable = "neon,frintts")]
26906 unsafe fn test_vrnd32z_f32() {
26907 let a: f32x2 = f32x2::new(1.1, 1.9);
26908 let e: f32x2 = f32x2::new(1.0, 1.0);
26909 let r: f32x2 = transmute(vrnd32z_f32(transmute(a)));
26910 assert_eq!(r, e);
26911 }
26912
26913 #[simd_test(enable = "neon,frintts")]
26914 unsafe fn test_vrnd32zq_f32() {
26915 let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
26916 let e: f32x4 = f32x4::new(1.0, 1.0, -1.0, -2.0);
26917 let r: f32x4 = transmute(vrnd32zq_f32(transmute(a)));
26918 assert_eq!(r, e);
26919 }
26920
26921 #[simd_test(enable = "neon,frintts")]
26922 unsafe fn test_vrnd64x_f32() {
26923 let a: f32x2 = f32x2::new(1.1, 1.9);
26924 let e: f32x2 = f32x2::new(1.0, 2.0);
26925 let r: f32x2 = transmute(vrnd64x_f32(transmute(a)));
26926 assert_eq!(r, e);
26927 }
26928
26929 #[simd_test(enable = "neon,frintts")]
26930 unsafe fn test_vrnd64xq_f32() {
26931 let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
26932 let e: f32x4 = f32x4::new(1.0, 2.0, -2.0, -2.0);
26933 let r: f32x4 = transmute(vrnd64xq_f32(transmute(a)));
26934 assert_eq!(r, e);
26935 }
26936
26937 #[simd_test(enable = "neon,frintts")]
26938 unsafe fn test_vrnd64z_f32() {
26939 let a: f32x2 = f32x2::new(1.1, 1.9);
26940 let e: f32x2 = f32x2::new(1.0, 1.0);
26941 let r: f32x2 = transmute(vrnd64z_f32(transmute(a)));
26942 assert_eq!(r, e);
26943 }
26944
26945 #[simd_test(enable = "neon,frintts")]
26946 unsafe fn test_vrnd64zq_f32() {
26947 let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
26948 let e: f32x4 = f32x4::new(1.0, 1.0, -1.0, -2.0);
26949 let r: f32x4 = transmute(vrnd64zq_f32(transmute(a)));
26950 assert_eq!(r, e);
26951 }
26952
17df50a5
XL
26953 #[simd_test(enable = "neon")]
26954 unsafe fn test_vtrn1_s8() {
26955 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
26956 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
26957 let e: i8x8 = i8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
26958 let r: i8x8 = transmute(vtrn1_s8(transmute(a), transmute(b)));
26959 assert_eq!(r, e);
26960 }
26961
26962 #[simd_test(enable = "neon")]
26963 unsafe fn test_vtrn1q_s8() {
26964 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
26965 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
26966 let e: i8x16 = i8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
26967 let r: i8x16 = transmute(vtrn1q_s8(transmute(a), transmute(b)));
26968 assert_eq!(r, e);
26969 }
26970
26971 #[simd_test(enable = "neon")]
26972 unsafe fn test_vtrn1_s16() {
26973 let a: i16x4 = i16x4::new(0, 2, 4, 6);
26974 let b: i16x4 = i16x4::new(1, 3, 5, 7);
26975 let e: i16x4 = i16x4::new(0, 1, 4, 5);
26976 let r: i16x4 = transmute(vtrn1_s16(transmute(a), transmute(b)));
26977 assert_eq!(r, e);
26978 }
26979
26980 #[simd_test(enable = "neon")]
26981 unsafe fn test_vtrn1q_s16() {
26982 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
26983 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
26984 let e: i16x8 = i16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
26985 let r: i16x8 = transmute(vtrn1q_s16(transmute(a), transmute(b)));
26986 assert_eq!(r, e);
26987 }
26988
26989 #[simd_test(enable = "neon")]
26990 unsafe fn test_vtrn1q_s32() {
26991 let a: i32x4 = i32x4::new(0, 2, 4, 6);
26992 let b: i32x4 = i32x4::new(1, 3, 5, 7);
26993 let e: i32x4 = i32x4::new(0, 1, 4, 5);
26994 let r: i32x4 = transmute(vtrn1q_s32(transmute(a), transmute(b)));
26995 assert_eq!(r, e);
26996 }
26997
26998 #[simd_test(enable = "neon")]
26999 unsafe fn test_vtrn1_u8() {
27000 let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27001 let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27002 let e: u8x8 = u8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27003 let r: u8x8 = transmute(vtrn1_u8(transmute(a), transmute(b)));
27004 assert_eq!(r, e);
27005 }
27006
27007 #[simd_test(enable = "neon")]
27008 unsafe fn test_vtrn1q_u8() {
27009 let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27010 let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27011 let e: u8x16 = u8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
27012 let r: u8x16 = transmute(vtrn1q_u8(transmute(a), transmute(b)));
27013 assert_eq!(r, e);
27014 }
27015
27016 #[simd_test(enable = "neon")]
27017 unsafe fn test_vtrn1_u16() {
27018 let a: u16x4 = u16x4::new(0, 2, 4, 6);
27019 let b: u16x4 = u16x4::new(1, 3, 5, 7);
27020 let e: u16x4 = u16x4::new(0, 1, 4, 5);
27021 let r: u16x4 = transmute(vtrn1_u16(transmute(a), transmute(b)));
27022 assert_eq!(r, e);
27023 }
27024
27025 #[simd_test(enable = "neon")]
27026 unsafe fn test_vtrn1q_u16() {
27027 let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27028 let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27029 let e: u16x8 = u16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27030 let r: u16x8 = transmute(vtrn1q_u16(transmute(a), transmute(b)));
27031 assert_eq!(r, e);
27032 }
27033
27034 #[simd_test(enable = "neon")]
27035 unsafe fn test_vtrn1q_u32() {
27036 let a: u32x4 = u32x4::new(0, 2, 4, 6);
27037 let b: u32x4 = u32x4::new(1, 3, 5, 7);
27038 let e: u32x4 = u32x4::new(0, 1, 4, 5);
27039 let r: u32x4 = transmute(vtrn1q_u32(transmute(a), transmute(b)));
27040 assert_eq!(r, e);
27041 }
27042
27043 #[simd_test(enable = "neon")]
27044 unsafe fn test_vtrn1_p8() {
27045 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27046 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27047 let e: i8x8 = i8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27048 let r: i8x8 = transmute(vtrn1_p8(transmute(a), transmute(b)));
27049 assert_eq!(r, e);
27050 }
27051
27052 #[simd_test(enable = "neon")]
27053 unsafe fn test_vtrn1q_p8() {
27054 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27055 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27056 let e: i8x16 = i8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
27057 let r: i8x16 = transmute(vtrn1q_p8(transmute(a), transmute(b)));
27058 assert_eq!(r, e);
27059 }
27060
27061 #[simd_test(enable = "neon")]
27062 unsafe fn test_vtrn1_p16() {
27063 let a: i16x4 = i16x4::new(0, 2, 4, 6);
27064 let b: i16x4 = i16x4::new(1, 3, 5, 7);
27065 let e: i16x4 = i16x4::new(0, 1, 4, 5);
27066 let r: i16x4 = transmute(vtrn1_p16(transmute(a), transmute(b)));
27067 assert_eq!(r, e);
27068 }
27069
27070 #[simd_test(enable = "neon")]
27071 unsafe fn test_vtrn1q_p16() {
27072 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27073 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27074 let e: i16x8 = i16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27075 let r: i16x8 = transmute(vtrn1q_p16(transmute(a), transmute(b)));
27076 assert_eq!(r, e);
27077 }
27078
27079 #[simd_test(enable = "neon")]
27080 unsafe fn test_vtrn1_s32() {
27081 let a: i32x2 = i32x2::new(0, 2);
27082 let b: i32x2 = i32x2::new(1, 3);
27083 let e: i32x2 = i32x2::new(0, 1);
27084 let r: i32x2 = transmute(vtrn1_s32(transmute(a), transmute(b)));
27085 assert_eq!(r, e);
27086 }
27087
27088 #[simd_test(enable = "neon")]
27089 unsafe fn test_vtrn1q_s64() {
27090 let a: i64x2 = i64x2::new(0, 2);
27091 let b: i64x2 = i64x2::new(1, 3);
27092 let e: i64x2 = i64x2::new(0, 1);
27093 let r: i64x2 = transmute(vtrn1q_s64(transmute(a), transmute(b)));
27094 assert_eq!(r, e);
27095 }
27096
27097 #[simd_test(enable = "neon")]
27098 unsafe fn test_vtrn1_u32() {
27099 let a: u32x2 = u32x2::new(0, 2);
27100 let b: u32x2 = u32x2::new(1, 3);
27101 let e: u32x2 = u32x2::new(0, 1);
27102 let r: u32x2 = transmute(vtrn1_u32(transmute(a), transmute(b)));
27103 assert_eq!(r, e);
27104 }
27105
27106 #[simd_test(enable = "neon")]
27107 unsafe fn test_vtrn1q_u64() {
27108 let a: u64x2 = u64x2::new(0, 2);
27109 let b: u64x2 = u64x2::new(1, 3);
27110 let e: u64x2 = u64x2::new(0, 1);
27111 let r: u64x2 = transmute(vtrn1q_u64(transmute(a), transmute(b)));
27112 assert_eq!(r, e);
27113 }
27114
27115 #[simd_test(enable = "neon")]
27116 unsafe fn test_vtrn1q_p64() {
27117 let a: i64x2 = i64x2::new(0, 2);
27118 let b: i64x2 = i64x2::new(1, 3);
27119 let e: i64x2 = i64x2::new(0, 1);
27120 let r: i64x2 = transmute(vtrn1q_p64(transmute(a), transmute(b)));
27121 assert_eq!(r, e);
27122 }
27123
27124 #[simd_test(enable = "neon")]
27125 unsafe fn test_vtrn1q_f32() {
27126 let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27127 let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27128 let e: f32x4 = f32x4::new(0., 1., 4., 5.);
27129 let r: f32x4 = transmute(vtrn1q_f32(transmute(a), transmute(b)));
27130 assert_eq!(r, e);
27131 }
27132
27133 #[simd_test(enable = "neon")]
27134 unsafe fn test_vtrn1_f32() {
27135 let a: f32x2 = f32x2::new(0., 2.);
27136 let b: f32x2 = f32x2::new(1., 3.);
27137 let e: f32x2 = f32x2::new(0., 1.);
27138 let r: f32x2 = transmute(vtrn1_f32(transmute(a), transmute(b)));
27139 assert_eq!(r, e);
27140 }
27141
27142 #[simd_test(enable = "neon")]
27143 unsafe fn test_vtrn1q_f64() {
27144 let a: f64x2 = f64x2::new(0., 2.);
27145 let b: f64x2 = f64x2::new(1., 3.);
27146 let e: f64x2 = f64x2::new(0., 1.);
27147 let r: f64x2 = transmute(vtrn1q_f64(transmute(a), transmute(b)));
27148 assert_eq!(r, e);
27149 }
27150
27151 #[simd_test(enable = "neon")]
27152 unsafe fn test_vtrn2_s8() {
27153 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27154 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27155 let e: i8x8 = i8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27156 let r: i8x8 = transmute(vtrn2_s8(transmute(a), transmute(b)));
27157 assert_eq!(r, e);
27158 }
27159
27160 #[simd_test(enable = "neon")]
27161 unsafe fn test_vtrn2q_s8() {
27162 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27163 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27164 let e: i8x16 = i8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27165 let r: i8x16 = transmute(vtrn2q_s8(transmute(a), transmute(b)));
27166 assert_eq!(r, e);
27167 }
27168
27169 #[simd_test(enable = "neon")]
27170 unsafe fn test_vtrn2_s16() {
27171 let a: i16x4 = i16x4::new(0, 2, 4, 6);
27172 let b: i16x4 = i16x4::new(1, 3, 5, 7);
27173 let e: i16x4 = i16x4::new(2, 3, 6, 7);
27174 let r: i16x4 = transmute(vtrn2_s16(transmute(a), transmute(b)));
27175 assert_eq!(r, e);
27176 }
27177
27178 #[simd_test(enable = "neon")]
27179 unsafe fn test_vtrn2q_s16() {
27180 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27181 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27182 let e: i16x8 = i16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27183 let r: i16x8 = transmute(vtrn2q_s16(transmute(a), transmute(b)));
27184 assert_eq!(r, e);
27185 }
27186
27187 #[simd_test(enable = "neon")]
27188 unsafe fn test_vtrn2q_s32() {
27189 let a: i32x4 = i32x4::new(0, 2, 4, 6);
27190 let b: i32x4 = i32x4::new(1, 3, 5, 7);
27191 let e: i32x4 = i32x4::new(2, 3, 6, 7);
27192 let r: i32x4 = transmute(vtrn2q_s32(transmute(a), transmute(b)));
27193 assert_eq!(r, e);
27194 }
27195
27196 #[simd_test(enable = "neon")]
27197 unsafe fn test_vtrn2_u8() {
27198 let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27199 let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27200 let e: u8x8 = u8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27201 let r: u8x8 = transmute(vtrn2_u8(transmute(a), transmute(b)));
27202 assert_eq!(r, e);
27203 }
27204
27205 #[simd_test(enable = "neon")]
27206 unsafe fn test_vtrn2q_u8() {
27207 let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27208 let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27209 let e: u8x16 = u8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27210 let r: u8x16 = transmute(vtrn2q_u8(transmute(a), transmute(b)));
27211 assert_eq!(r, e);
27212 }
27213
27214 #[simd_test(enable = "neon")]
27215 unsafe fn test_vtrn2_u16() {
27216 let a: u16x4 = u16x4::new(0, 2, 4, 6);
27217 let b: u16x4 = u16x4::new(1, 3, 5, 7);
27218 let e: u16x4 = u16x4::new(2, 3, 6, 7);
27219 let r: u16x4 = transmute(vtrn2_u16(transmute(a), transmute(b)));
27220 assert_eq!(r, e);
27221 }
27222
27223 #[simd_test(enable = "neon")]
27224 unsafe fn test_vtrn2q_u16() {
27225 let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27226 let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27227 let e: u16x8 = u16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27228 let r: u16x8 = transmute(vtrn2q_u16(transmute(a), transmute(b)));
27229 assert_eq!(r, e);
27230 }
27231
27232 #[simd_test(enable = "neon")]
27233 unsafe fn test_vtrn2q_u32() {
27234 let a: u32x4 = u32x4::new(0, 2, 4, 6);
27235 let b: u32x4 = u32x4::new(1, 3, 5, 7);
27236 let e: u32x4 = u32x4::new(2, 3, 6, 7);
27237 let r: u32x4 = transmute(vtrn2q_u32(transmute(a), transmute(b)));
27238 assert_eq!(r, e);
27239 }
27240
27241 #[simd_test(enable = "neon")]
27242 unsafe fn test_vtrn2_p8() {
27243 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27244 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27245 let e: i8x8 = i8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27246 let r: i8x8 = transmute(vtrn2_p8(transmute(a), transmute(b)));
27247 assert_eq!(r, e);
27248 }
27249
27250 #[simd_test(enable = "neon")]
27251 unsafe fn test_vtrn2q_p8() {
27252 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27253 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27254 let e: i8x16 = i8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27255 let r: i8x16 = transmute(vtrn2q_p8(transmute(a), transmute(b)));
27256 assert_eq!(r, e);
27257 }
27258
27259 #[simd_test(enable = "neon")]
27260 unsafe fn test_vtrn2_p16() {
27261 let a: i16x4 = i16x4::new(0, 2, 4, 6);
27262 let b: i16x4 = i16x4::new(1, 3, 5, 7);
27263 let e: i16x4 = i16x4::new(2, 3, 6, 7);
27264 let r: i16x4 = transmute(vtrn2_p16(transmute(a), transmute(b)));
27265 assert_eq!(r, e);
27266 }
27267
27268 #[simd_test(enable = "neon")]
27269 unsafe fn test_vtrn2q_p16() {
27270 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27271 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27272 let e: i16x8 = i16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27273 let r: i16x8 = transmute(vtrn2q_p16(transmute(a), transmute(b)));
27274 assert_eq!(r, e);
27275 }
27276
27277 #[simd_test(enable = "neon")]
27278 unsafe fn test_vtrn2_s32() {
27279 let a: i32x2 = i32x2::new(0, 2);
27280 let b: i32x2 = i32x2::new(1, 3);
27281 let e: i32x2 = i32x2::new(2, 3);
27282 let r: i32x2 = transmute(vtrn2_s32(transmute(a), transmute(b)));
27283 assert_eq!(r, e);
27284 }
27285
27286 #[simd_test(enable = "neon")]
27287 unsafe fn test_vtrn2q_s64() {
27288 let a: i64x2 = i64x2::new(0, 2);
27289 let b: i64x2 = i64x2::new(1, 3);
27290 let e: i64x2 = i64x2::new(2, 3);
27291 let r: i64x2 = transmute(vtrn2q_s64(transmute(a), transmute(b)));
27292 assert_eq!(r, e);
27293 }
27294
27295 #[simd_test(enable = "neon")]
27296 unsafe fn test_vtrn2_u32() {
27297 let a: u32x2 = u32x2::new(0, 2);
27298 let b: u32x2 = u32x2::new(1, 3);
27299 let e: u32x2 = u32x2::new(2, 3);
27300 let r: u32x2 = transmute(vtrn2_u32(transmute(a), transmute(b)));
27301 assert_eq!(r, e);
27302 }
27303
27304 #[simd_test(enable = "neon")]
27305 unsafe fn test_vtrn2q_u64() {
27306 let a: u64x2 = u64x2::new(0, 2);
27307 let b: u64x2 = u64x2::new(1, 3);
27308 let e: u64x2 = u64x2::new(2, 3);
27309 let r: u64x2 = transmute(vtrn2q_u64(transmute(a), transmute(b)));
27310 assert_eq!(r, e);
27311 }
27312
27313 #[simd_test(enable = "neon")]
27314 unsafe fn test_vtrn2q_p64() {
27315 let a: i64x2 = i64x2::new(0, 2);
27316 let b: i64x2 = i64x2::new(1, 3);
27317 let e: i64x2 = i64x2::new(2, 3);
27318 let r: i64x2 = transmute(vtrn2q_p64(transmute(a), transmute(b)));
27319 assert_eq!(r, e);
27320 }
27321
27322 #[simd_test(enable = "neon")]
27323 unsafe fn test_vtrn2q_f32() {
27324 let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27325 let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27326 let e: f32x4 = f32x4::new(2., 3., 6., 7.);
27327 let r: f32x4 = transmute(vtrn2q_f32(transmute(a), transmute(b)));
27328 assert_eq!(r, e);
27329 }
27330
27331 #[simd_test(enable = "neon")]
27332 unsafe fn test_vtrn2_f32() {
27333 let a: f32x2 = f32x2::new(0., 2.);
27334 let b: f32x2 = f32x2::new(1., 3.);
27335 let e: f32x2 = f32x2::new(2., 3.);
27336 let r: f32x2 = transmute(vtrn2_f32(transmute(a), transmute(b)));
27337 assert_eq!(r, e);
27338 }
27339
27340 #[simd_test(enable = "neon")]
27341 unsafe fn test_vtrn2q_f64() {
27342 let a: f64x2 = f64x2::new(0., 2.);
27343 let b: f64x2 = f64x2::new(1., 3.);
27344 let e: f64x2 = f64x2::new(2., 3.);
27345 let r: f64x2 = transmute(vtrn2q_f64(transmute(a), transmute(b)));
27346 assert_eq!(r, e);
27347 }
27348
27349 #[simd_test(enable = "neon")]
27350 unsafe fn test_vzip1_s8() {
27351 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27352 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27353 let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27354 let r: i8x8 = transmute(vzip1_s8(transmute(a), transmute(b)));
27355 assert_eq!(r, e);
27356 }
27357
27358 #[simd_test(enable = "neon")]
27359 unsafe fn test_vzip1q_s8() {
27360 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27361 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27362 let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27363 let r: i8x16 = transmute(vzip1q_s8(transmute(a), transmute(b)));
27364 assert_eq!(r, e);
27365 }
27366
27367 #[simd_test(enable = "neon")]
27368 unsafe fn test_vzip1_s16() {
27369 let a: i16x4 = i16x4::new(0, 2, 4, 6);
27370 let b: i16x4 = i16x4::new(1, 3, 5, 7);
27371 let e: i16x4 = i16x4::new(0, 1, 2, 3);
27372 let r: i16x4 = transmute(vzip1_s16(transmute(a), transmute(b)));
27373 assert_eq!(r, e);
27374 }
27375
27376 #[simd_test(enable = "neon")]
27377 unsafe fn test_vzip1q_s16() {
27378 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27379 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27380 let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27381 let r: i16x8 = transmute(vzip1q_s16(transmute(a), transmute(b)));
27382 assert_eq!(r, e);
27383 }
27384
27385 #[simd_test(enable = "neon")]
27386 unsafe fn test_vzip1_s32() {
27387 let a: i32x2 = i32x2::new(0, 2);
27388 let b: i32x2 = i32x2::new(1, 3);
27389 let e: i32x2 = i32x2::new(0, 1);
27390 let r: i32x2 = transmute(vzip1_s32(transmute(a), transmute(b)));
27391 assert_eq!(r, e);
27392 }
27393
27394 #[simd_test(enable = "neon")]
27395 unsafe fn test_vzip1q_s32() {
27396 let a: i32x4 = i32x4::new(0, 2, 4, 6);
27397 let b: i32x4 = i32x4::new(1, 3, 5, 7);
27398 let e: i32x4 = i32x4::new(0, 1, 2, 3);
27399 let r: i32x4 = transmute(vzip1q_s32(transmute(a), transmute(b)));
27400 assert_eq!(r, e);
27401 }
27402
27403 #[simd_test(enable = "neon")]
27404 unsafe fn test_vzip1q_s64() {
27405 let a: i64x2 = i64x2::new(0, 2);
27406 let b: i64x2 = i64x2::new(1, 3);
27407 let e: i64x2 = i64x2::new(0, 1);
27408 let r: i64x2 = transmute(vzip1q_s64(transmute(a), transmute(b)));
27409 assert_eq!(r, e);
27410 }
27411
27412 #[simd_test(enable = "neon")]
27413 unsafe fn test_vzip1_u8() {
27414 let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27415 let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27416 let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27417 let r: u8x8 = transmute(vzip1_u8(transmute(a), transmute(b)));
27418 assert_eq!(r, e);
27419 }
27420
27421 #[simd_test(enable = "neon")]
27422 unsafe fn test_vzip1q_u8() {
27423 let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27424 let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27425 let e: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27426 let r: u8x16 = transmute(vzip1q_u8(transmute(a), transmute(b)));
27427 assert_eq!(r, e);
27428 }
27429
27430 #[simd_test(enable = "neon")]
27431 unsafe fn test_vzip1_u16() {
27432 let a: u16x4 = u16x4::new(0, 2, 4, 6);
27433 let b: u16x4 = u16x4::new(1, 3, 5, 7);
27434 let e: u16x4 = u16x4::new(0, 1, 2, 3);
27435 let r: u16x4 = transmute(vzip1_u16(transmute(a), transmute(b)));
27436 assert_eq!(r, e);
27437 }
27438
27439 #[simd_test(enable = "neon")]
27440 unsafe fn test_vzip1q_u16() {
27441 let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27442 let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27443 let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27444 let r: u16x8 = transmute(vzip1q_u16(transmute(a), transmute(b)));
27445 assert_eq!(r, e);
27446 }
27447
27448 #[simd_test(enable = "neon")]
27449 unsafe fn test_vzip1_u32() {
27450 let a: u32x2 = u32x2::new(0, 2);
27451 let b: u32x2 = u32x2::new(1, 3);
27452 let e: u32x2 = u32x2::new(0, 1);
27453 let r: u32x2 = transmute(vzip1_u32(transmute(a), transmute(b)));
27454 assert_eq!(r, e);
27455 }
27456
27457 #[simd_test(enable = "neon")]
27458 unsafe fn test_vzip1q_u32() {
27459 let a: u32x4 = u32x4::new(0, 2, 4, 6);
27460 let b: u32x4 = u32x4::new(1, 3, 5, 7);
27461 let e: u32x4 = u32x4::new(0, 1, 2, 3);
27462 let r: u32x4 = transmute(vzip1q_u32(transmute(a), transmute(b)));
27463 assert_eq!(r, e);
27464 }
27465
27466 #[simd_test(enable = "neon")]
27467 unsafe fn test_vzip1q_u64() {
27468 let a: u64x2 = u64x2::new(0, 2);
27469 let b: u64x2 = u64x2::new(1, 3);
27470 let e: u64x2 = u64x2::new(0, 1);
27471 let r: u64x2 = transmute(vzip1q_u64(transmute(a), transmute(b)));
27472 assert_eq!(r, e);
27473 }
27474
27475 #[simd_test(enable = "neon")]
27476 unsafe fn test_vzip1_p8() {
27477 let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27478 let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27479 let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27480 let r: i8x8 = transmute(vzip1_p8(transmute(a), transmute(b)));
27481 assert_eq!(r, e);
27482 }
27483
27484 #[simd_test(enable = "neon")]
27485 unsafe fn test_vzip1q_p8() {
27486 let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27487 let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27488 let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27489 let r: i8x16 = transmute(vzip1q_p8(transmute(a), transmute(b)));
27490 assert_eq!(r, e);
27491 }
27492
27493 #[simd_test(enable = "neon")]
27494 unsafe fn test_vzip1_p16() {
27495 let a: i16x4 = i16x4::new(0, 2, 4, 6);
27496 let b: i16x4 = i16x4::new(1, 3, 5, 7);
27497 let e: i16x4 = i16x4::new(0, 1, 2, 3);
27498 let r: i16x4 = transmute(vzip1_p16(transmute(a), transmute(b)));
27499 assert_eq!(r, e);
27500 }
27501
27502 #[simd_test(enable = "neon")]
27503 unsafe fn test_vzip1q_p16() {
27504 let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27505 let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27506 let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27507 let r: i16x8 = transmute(vzip1q_p16(transmute(a), transmute(b)));
27508 assert_eq!(r, e);
27509 }
27510
27511 #[simd_test(enable = "neon")]
27512 unsafe fn test_vzip1q_p64() {
27513 let a: i64x2 = i64x2::new(0, 2);
27514 let b: i64x2 = i64x2::new(1, 3);
27515 let e: i64x2 = i64x2::new(0, 1);
27516 let r: i64x2 = transmute(vzip1q_p64(transmute(a), transmute(b)));
27517 assert_eq!(r, e);
27518 }
27519
27520 #[simd_test(enable = "neon")]
27521 unsafe fn test_vzip1_f32() {
27522 let a: f32x2 = f32x2::new(0., 2.);
27523 let b: f32x2 = f32x2::new(1., 3.);
27524 let e: f32x2 = f32x2::new(0., 1.);
27525 let r: f32x2 = transmute(vzip1_f32(transmute(a), transmute(b)));
27526 assert_eq!(r, e);
27527 }
27528
27529 #[simd_test(enable = "neon")]
27530 unsafe fn test_vzip1q_f32() {
27531 let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27532 let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27533 let e: f32x4 = f32x4::new(0., 1., 2., 3.);
27534 let r: f32x4 = transmute(vzip1q_f32(transmute(a), transmute(b)));
27535 assert_eq!(r, e);
27536 }
27537
27538 #[simd_test(enable = "neon")]
27539 unsafe fn test_vzip1q_f64() {
27540 let a: f64x2 = f64x2::new(0., 2.);
27541 let b: f64x2 = f64x2::new(1., 3.);
27542 let e: f64x2 = f64x2::new(0., 1.);
27543 let r: f64x2 = transmute(vzip1q_f64(transmute(a), transmute(b)));
27544 assert_eq!(r, e);
27545 }
27546
27547 #[simd_test(enable = "neon")]
27548 unsafe fn test_vzip2_s8() {
27549 let a: i8x8 = i8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27550 let b: i8x8 = i8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27551 let e: i8x8 = i8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27552 let r: i8x8 = transmute(vzip2_s8(transmute(a), transmute(b)));
27553 assert_eq!(r, e);
27554 }
27555
27556 #[simd_test(enable = "neon")]
27557 unsafe fn test_vzip2q_s8() {
27558 let a: i8x16 = i8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
27559 let b: i8x16 = i8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
27560 let e: i8x16 = i8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
27561 let r: i8x16 = transmute(vzip2q_s8(transmute(a), transmute(b)));
27562 assert_eq!(r, e);
27563 }
27564
27565 #[simd_test(enable = "neon")]
27566 unsafe fn test_vzip2_s16() {
27567 let a: i16x4 = i16x4::new(0, 16, 16, 18);
27568 let b: i16x4 = i16x4::new(1, 17, 17, 19);
27569 let e: i16x4 = i16x4::new(16, 17, 18, 19);
27570 let r: i16x4 = transmute(vzip2_s16(transmute(a), transmute(b)));
27571 assert_eq!(r, e);
27572 }
27573
27574 #[simd_test(enable = "neon")]
27575 unsafe fn test_vzip2q_s16() {
27576 let a: i16x8 = i16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27577 let b: i16x8 = i16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27578 let e: i16x8 = i16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27579 let r: i16x8 = transmute(vzip2q_s16(transmute(a), transmute(b)));
27580 assert_eq!(r, e);
27581 }
27582
27583 #[simd_test(enable = "neon")]
27584 unsafe fn test_vzip2_s32() {
27585 let a: i32x2 = i32x2::new(0, 16);
27586 let b: i32x2 = i32x2::new(1, 17);
27587 let e: i32x2 = i32x2::new(16, 17);
27588 let r: i32x2 = transmute(vzip2_s32(transmute(a), transmute(b)));
27589 assert_eq!(r, e);
27590 }
27591
27592 #[simd_test(enable = "neon")]
27593 unsafe fn test_vzip2q_s32() {
27594 let a: i32x4 = i32x4::new(0, 16, 16, 18);
27595 let b: i32x4 = i32x4::new(1, 17, 17, 19);
27596 let e: i32x4 = i32x4::new(16, 17, 18, 19);
27597 let r: i32x4 = transmute(vzip2q_s32(transmute(a), transmute(b)));
27598 assert_eq!(r, e);
27599 }
27600
27601 #[simd_test(enable = "neon")]
27602 unsafe fn test_vzip2q_s64() {
27603 let a: i64x2 = i64x2::new(0, 16);
27604 let b: i64x2 = i64x2::new(1, 17);
27605 let e: i64x2 = i64x2::new(16, 17);
27606 let r: i64x2 = transmute(vzip2q_s64(transmute(a), transmute(b)));
27607 assert_eq!(r, e);
27608 }
27609
27610 #[simd_test(enable = "neon")]
27611 unsafe fn test_vzip2_u8() {
27612 let a: u8x8 = u8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27613 let b: u8x8 = u8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27614 let e: u8x8 = u8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27615 let r: u8x8 = transmute(vzip2_u8(transmute(a), transmute(b)));
27616 assert_eq!(r, e);
27617 }
27618
27619 #[simd_test(enable = "neon")]
27620 unsafe fn test_vzip2q_u8() {
27621 let a: u8x16 = u8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
27622 let b: u8x16 = u8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
27623 let e: u8x16 = u8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
27624 let r: u8x16 = transmute(vzip2q_u8(transmute(a), transmute(b)));
27625 assert_eq!(r, e);
27626 }
27627
27628 #[simd_test(enable = "neon")]
27629 unsafe fn test_vzip2_u16() {
27630 let a: u16x4 = u16x4::new(0, 16, 16, 18);
27631 let b: u16x4 = u16x4::new(1, 17, 17, 19);
27632 let e: u16x4 = u16x4::new(16, 17, 18, 19);
27633 let r: u16x4 = transmute(vzip2_u16(transmute(a), transmute(b)));
27634 assert_eq!(r, e);
27635 }
27636
27637 #[simd_test(enable = "neon")]
27638 unsafe fn test_vzip2q_u16() {
27639 let a: u16x8 = u16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27640 let b: u16x8 = u16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27641 let e: u16x8 = u16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27642 let r: u16x8 = transmute(vzip2q_u16(transmute(a), transmute(b)));
27643 assert_eq!(r, e);
27644 }
27645
27646 #[simd_test(enable = "neon")]
27647 unsafe fn test_vzip2_u32() {
27648 let a: u32x2 = u32x2::new(0, 16);
27649 let b: u32x2 = u32x2::new(1, 17);
27650 let e: u32x2 = u32x2::new(16, 17);
27651 let r: u32x2 = transmute(vzip2_u32(transmute(a), transmute(b)));
27652 assert_eq!(r, e);
27653 }
27654
27655 #[simd_test(enable = "neon")]
27656 unsafe fn test_vzip2q_u32() {
27657 let a: u32x4 = u32x4::new(0, 16, 16, 18);
27658 let b: u32x4 = u32x4::new(1, 17, 17, 19);
27659 let e: u32x4 = u32x4::new(16, 17, 18, 19);
27660 let r: u32x4 = transmute(vzip2q_u32(transmute(a), transmute(b)));
27661 assert_eq!(r, e);
27662 }
27663
27664 #[simd_test(enable = "neon")]
27665 unsafe fn test_vzip2q_u64() {
27666 let a: u64x2 = u64x2::new(0, 16);
27667 let b: u64x2 = u64x2::new(1, 17);
27668 let e: u64x2 = u64x2::new(16, 17);
27669 let r: u64x2 = transmute(vzip2q_u64(transmute(a), transmute(b)));
27670 assert_eq!(r, e);
27671 }
27672
27673 #[simd_test(enable = "neon")]
27674 unsafe fn test_vzip2_p8() {
27675 let a: i8x8 = i8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27676 let b: i8x8 = i8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27677 let e: i8x8 = i8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27678 let r: i8x8 = transmute(vzip2_p8(transmute(a), transmute(b)));
27679 assert_eq!(r, e);
27680 }
27681
27682 #[simd_test(enable = "neon")]
27683 unsafe fn test_vzip2q_p8() {
27684 let a: i8x16 = i8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
27685 let b: i8x16 = i8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
27686 let e: i8x16 = i8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
27687 let r: i8x16 = transmute(vzip2q_p8(transmute(a), transmute(b)));
27688 assert_eq!(r, e);
27689 }
27690
27691 #[simd_test(enable = "neon")]
27692 unsafe fn test_vzip2_p16() {
27693 let a: i16x4 = i16x4::new(0, 16, 16, 18);
27694 let b: i16x4 = i16x4::new(1, 17, 17, 19);
27695 let e: i16x4 = i16x4::new(16, 17, 18, 19);
27696 let r: i16x4 = transmute(vzip2_p16(transmute(a), transmute(b)));
27697 assert_eq!(r, e);
27698 }
27699
27700 #[simd_test(enable = "neon")]
27701 unsafe fn test_vzip2q_p16() {
27702 let a: i16x8 = i16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27703 let b: i16x8 = i16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27704 let e: i16x8 = i16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27705 let r: i16x8 = transmute(vzip2q_p16(transmute(a), transmute(b)));
27706 assert_eq!(r, e);
27707 }
27708
27709 #[simd_test(enable = "neon")]
27710 unsafe fn test_vzip2q_p64() {
27711 let a: i64x2 = i64x2::new(0, 16);
27712 let b: i64x2 = i64x2::new(1, 17);
27713 let e: i64x2 = i64x2::new(16, 17);
27714 let r: i64x2 = transmute(vzip2q_p64(transmute(a), transmute(b)));
27715 assert_eq!(r, e);
27716 }
ba9703b0
XL
27717
27718 #[simd_test(enable = "neon")]
17df50a5
XL
27719 unsafe fn test_vzip2_f32() {
27720 let a: f32x2 = f32x2::new(0., 8.);
27721 let b: f32x2 = f32x2::new(1., 9.);
27722 let e: f32x2 = f32x2::new(8., 9.);
27723 let r: f32x2 = transmute(vzip2_f32(transmute(a), transmute(b)));
ba9703b0 27724 assert_eq!(r, e);
17df50a5 27725 }
ba9703b0 27726
17df50a5
XL
27727 #[simd_test(enable = "neon")]
27728 unsafe fn test_vzip2q_f32() {
27729 let a: f32x4 = f32x4::new(0., 8., 8., 10.);
27730 let b: f32x4 = f32x4::new(1., 9., 9., 11.);
27731 let e: f32x4 = f32x4::new(8., 9., 10., 11.);
27732 let r: f32x4 = transmute(vzip2q_f32(transmute(a), transmute(b)));
ba9703b0
XL
27733 assert_eq!(r, e);
27734 }
27735
27736 #[simd_test(enable = "neon")]
17df50a5
XL
27737 unsafe fn test_vzip2q_f64() {
27738 let a: f64x2 = f64x2::new(0., 8.);
27739 let b: f64x2 = f64x2::new(1., 9.);
27740 let e: f64x2 = f64x2::new(8., 9.);
27741 let r: f64x2 = transmute(vzip2q_f64(transmute(a), transmute(b)));
ba9703b0 27742 assert_eq!(r, e);
17df50a5 27743 }
ba9703b0 27744
17df50a5
XL
27745 #[simd_test(enable = "neon")]
27746 unsafe fn test_vuzp1_s8() {
27747 let a: i8x8 = i8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27748 let b: i8x8 = i8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27749 let e: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27750 let r: i8x8 = transmute(vuzp1_s8(transmute(a), transmute(b)));
ba9703b0
XL
27751 assert_eq!(r, e);
27752 }
27753
27754 #[simd_test(enable = "neon")]
17df50a5
XL
27755 unsafe fn test_vuzp1q_s8() {
27756 let a: i8x16 = i8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
27757 let b: i8x16 = i8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
27758 let e: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
27759 let r: i8x16 = transmute(vuzp1q_s8(transmute(a), transmute(b)));
ba9703b0 27760 assert_eq!(r, e);
17df50a5 27761 }
ba9703b0 27762
17df50a5
XL
27763 #[simd_test(enable = "neon")]
27764 unsafe fn test_vuzp1_s16() {
27765 let a: i16x4 = i16x4::new(1, 0, 2, 0);
27766 let b: i16x4 = i16x4::new(2, 0, 3, 0);
27767 let e: i16x4 = i16x4::new(1, 2, 2, 3);
27768 let r: i16x4 = transmute(vuzp1_s16(transmute(a), transmute(b)));
ba9703b0
XL
27769 assert_eq!(r, e);
27770 }
27771
27772 #[simd_test(enable = "neon")]
17df50a5
XL
27773 unsafe fn test_vuzp1q_s16() {
27774 let a: i16x8 = i16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27775 let b: i16x8 = i16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27776 let e: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27777 let r: i16x8 = transmute(vuzp1q_s16(transmute(a), transmute(b)));
ba9703b0 27778 assert_eq!(r, e);
17df50a5 27779 }
ba9703b0 27780
17df50a5
XL
27781 #[simd_test(enable = "neon")]
27782 unsafe fn test_vuzp1q_s32() {
27783 let a: i32x4 = i32x4::new(1, 0, 2, 0);
27784 let b: i32x4 = i32x4::new(2, 0, 3, 0);
27785 let e: i32x4 = i32x4::new(1, 2, 2, 3);
27786 let r: i32x4 = transmute(vuzp1q_s32(transmute(a), transmute(b)));
ba9703b0
XL
27787 assert_eq!(r, e);
27788 }
27789
27790 #[simd_test(enable = "neon")]
17df50a5
XL
27791 unsafe fn test_vuzp1_u8() {
27792 let a: u8x8 = u8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27793 let b: u8x8 = u8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27794 let e: u8x8 = u8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27795 let r: u8x8 = transmute(vuzp1_u8(transmute(a), transmute(b)));
ba9703b0 27796 assert_eq!(r, e);
17df50a5 27797 }
ba9703b0 27798
17df50a5
XL
27799 #[simd_test(enable = "neon")]
27800 unsafe fn test_vuzp1q_u8() {
27801 let a: u8x16 = u8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
27802 let b: u8x16 = u8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
27803 let e: u8x16 = u8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
27804 let r: u8x16 = transmute(vuzp1q_u8(transmute(a), transmute(b)));
ba9703b0
XL
27805 assert_eq!(r, e);
27806 }
27807
27808 #[simd_test(enable = "neon")]
17df50a5
XL
27809 unsafe fn test_vuzp1_u16() {
27810 let a: u16x4 = u16x4::new(1, 0, 2, 0);
27811 let b: u16x4 = u16x4::new(2, 0, 3, 0);
27812 let e: u16x4 = u16x4::new(1, 2, 2, 3);
27813 let r: u16x4 = transmute(vuzp1_u16(transmute(a), transmute(b)));
ba9703b0 27814 assert_eq!(r, e);
17df50a5 27815 }
ba9703b0 27816
17df50a5
XL
27817 #[simd_test(enable = "neon")]
27818 unsafe fn test_vuzp1q_u16() {
27819 let a: u16x8 = u16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27820 let b: u16x8 = u16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27821 let e: u16x8 = u16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27822 let r: u16x8 = transmute(vuzp1q_u16(transmute(a), transmute(b)));
ba9703b0
XL
27823 assert_eq!(r, e);
27824 }
27825
27826 #[simd_test(enable = "neon")]
17df50a5
XL
27827 unsafe fn test_vuzp1q_u32() {
27828 let a: u32x4 = u32x4::new(1, 0, 2, 0);
27829 let b: u32x4 = u32x4::new(2, 0, 3, 0);
27830 let e: u32x4 = u32x4::new(1, 2, 2, 3);
27831 let r: u32x4 = transmute(vuzp1q_u32(transmute(a), transmute(b)));
ba9703b0
XL
27832 assert_eq!(r, e);
27833 }
27834
27835 #[simd_test(enable = "neon")]
17df50a5
XL
27836 unsafe fn test_vuzp1_p8() {
27837 let a: i8x8 = i8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27838 let b: i8x8 = i8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27839 let e: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27840 let r: i8x8 = transmute(vuzp1_p8(transmute(a), transmute(b)));
ba9703b0
XL
27841 assert_eq!(r, e);
27842 }
27843
27844 #[simd_test(enable = "neon")]
17df50a5
XL
27845 unsafe fn test_vuzp1q_p8() {
27846 let a: i8x16 = i8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
27847 let b: i8x16 = i8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
27848 let e: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
27849 let r: i8x16 = transmute(vuzp1q_p8(transmute(a), transmute(b)));
ba9703b0
XL
27850 assert_eq!(r, e);
27851 }
27852
27853 #[simd_test(enable = "neon")]
17df50a5
XL
27854 unsafe fn test_vuzp1_p16() {
27855 let a: i16x4 = i16x4::new(1, 0, 2, 0);
27856 let b: i16x4 = i16x4::new(2, 0, 3, 0);
27857 let e: i16x4 = i16x4::new(1, 2, 2, 3);
27858 let r: i16x4 = transmute(vuzp1_p16(transmute(a), transmute(b)));
ba9703b0
XL
27859 assert_eq!(r, e);
27860 }
27861
27862 #[simd_test(enable = "neon")]
17df50a5
XL
27863 unsafe fn test_vuzp1q_p16() {
27864 let a: i16x8 = i16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
27865 let b: i16x8 = i16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
27866 let e: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
27867 let r: i16x8 = transmute(vuzp1q_p16(transmute(a), transmute(b)));
ba9703b0
XL
27868 assert_eq!(r, e);
27869 }
27870
27871 #[simd_test(enable = "neon")]
17df50a5
XL
27872 unsafe fn test_vuzp1_s32() {
27873 let a: i32x2 = i32x2::new(1, 0);
27874 let b: i32x2 = i32x2::new(2, 0);
27875 let e: i32x2 = i32x2::new(1, 2);
27876 let r: i32x2 = transmute(vuzp1_s32(transmute(a), transmute(b)));
ba9703b0
XL
27877 assert_eq!(r, e);
27878 }
27879
27880 #[simd_test(enable = "neon")]
17df50a5
XL
27881 unsafe fn test_vuzp1q_s64() {
27882 let a: i64x2 = i64x2::new(1, 0);
27883 let b: i64x2 = i64x2::new(2, 0);
27884 let e: i64x2 = i64x2::new(1, 2);
27885 let r: i64x2 = transmute(vuzp1q_s64(transmute(a), transmute(b)));
ba9703b0
XL
27886 assert_eq!(r, e);
27887 }
27888
27889 #[simd_test(enable = "neon")]
17df50a5
XL
27890 unsafe fn test_vuzp1_u32() {
27891 let a: u32x2 = u32x2::new(1, 0);
27892 let b: u32x2 = u32x2::new(2, 0);
27893 let e: u32x2 = u32x2::new(1, 2);
27894 let r: u32x2 = transmute(vuzp1_u32(transmute(a), transmute(b)));
ba9703b0
XL
27895 assert_eq!(r, e);
27896 }
27897
27898 #[simd_test(enable = "neon")]
17df50a5
XL
27899 unsafe fn test_vuzp1q_u64() {
27900 let a: u64x2 = u64x2::new(1, 0);
27901 let b: u64x2 = u64x2::new(2, 0);
27902 let e: u64x2 = u64x2::new(1, 2);
27903 let r: u64x2 = transmute(vuzp1q_u64(transmute(a), transmute(b)));
ba9703b0
XL
27904 assert_eq!(r, e);
27905 }
27906
27907 #[simd_test(enable = "neon")]
17df50a5
XL
27908 unsafe fn test_vuzp1q_p64() {
27909 let a: i64x2 = i64x2::new(1, 0);
27910 let b: i64x2 = i64x2::new(2, 0);
27911 let e: i64x2 = i64x2::new(1, 2);
27912 let r: i64x2 = transmute(vuzp1q_p64(transmute(a), transmute(b)));
ba9703b0
XL
27913 assert_eq!(r, e);
27914 }
27915
27916 #[simd_test(enable = "neon")]
17df50a5
XL
27917 unsafe fn test_vuzp1q_f32() {
27918 let a: f32x4 = f32x4::new(0., 8., 1., 9.);
27919 let b: f32x4 = f32x4::new(1., 10., 3., 11.);
27920 let e: f32x4 = f32x4::new(0., 1., 1., 3.);
27921 let r: f32x4 = transmute(vuzp1q_f32(transmute(a), transmute(b)));
ba9703b0
XL
27922 assert_eq!(r, e);
27923 }
27924
27925 #[simd_test(enable = "neon")]
17df50a5
XL
27926 unsafe fn test_vuzp1_f32() {
27927 let a: f32x2 = f32x2::new(0., 8.);
27928 let b: f32x2 = f32x2::new(1., 10.);
27929 let e: f32x2 = f32x2::new(0., 1.);
27930 let r: f32x2 = transmute(vuzp1_f32(transmute(a), transmute(b)));
ba9703b0
XL
27931 assert_eq!(r, e);
27932 }
27933
27934 #[simd_test(enable = "neon")]
17df50a5
XL
27935 unsafe fn test_vuzp1q_f64() {
27936 let a: f64x2 = f64x2::new(0., 8.);
27937 let b: f64x2 = f64x2::new(1., 10.);
27938 let e: f64x2 = f64x2::new(0., 1.);
27939 let r: f64x2 = transmute(vuzp1q_f64(transmute(a), transmute(b)));
ba9703b0
XL
27940 assert_eq!(r, e);
27941 }
27942
27943 #[simd_test(enable = "neon")]
17df50a5
XL
27944 unsafe fn test_vuzp2_s8() {
27945 let a: i8x8 = i8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
27946 let b: i8x8 = i8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
27947 let e: i8x8 = i8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
27948 let r: i8x8 = transmute(vuzp2_s8(transmute(a), transmute(b)));
ba9703b0
XL
27949 assert_eq!(r, e);
27950 }
27951
27952 #[simd_test(enable = "neon")]
17df50a5
XL
27953 unsafe fn test_vuzp2q_s8() {
27954 let a: i8x16 = i8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
27955 let b: i8x16 = i8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
27956 let e: i8x16 = i8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
27957 let r: i8x16 = transmute(vuzp2q_s8(transmute(a), transmute(b)));
ba9703b0
XL
27958 assert_eq!(r, e);
27959 }
27960
27961 #[simd_test(enable = "neon")]
17df50a5
XL
27962 unsafe fn test_vuzp2_s16() {
27963 let a: i16x4 = i16x4::new(0, 17, 0, 18);
27964 let b: i16x4 = i16x4::new(0, 18, 0, 19);
27965 let e: i16x4 = i16x4::new(17, 18, 18, 19);
27966 let r: i16x4 = transmute(vuzp2_s16(transmute(a), transmute(b)));
ba9703b0
XL
27967 assert_eq!(r, e);
27968 }
27969
27970 #[simd_test(enable = "neon")]
17df50a5
XL
27971 unsafe fn test_vuzp2q_s16() {
27972 let a: i16x8 = i16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
27973 let b: i16x8 = i16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
27974 let e: i16x8 = i16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
27975 let r: i16x8 = transmute(vuzp2q_s16(transmute(a), transmute(b)));
ba9703b0
XL
27976 assert_eq!(r, e);
27977 }
27978
27979 #[simd_test(enable = "neon")]
17df50a5
XL
27980 unsafe fn test_vuzp2q_s32() {
27981 let a: i32x4 = i32x4::new(0, 17, 0, 18);
27982 let b: i32x4 = i32x4::new(0, 18, 0, 19);
27983 let e: i32x4 = i32x4::new(17, 18, 18, 19);
27984 let r: i32x4 = transmute(vuzp2q_s32(transmute(a), transmute(b)));
ba9703b0
XL
27985 assert_eq!(r, e);
27986 }
27987
27988 #[simd_test(enable = "neon")]
17df50a5
XL
27989 unsafe fn test_vuzp2_u8() {
27990 let a: u8x8 = u8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
27991 let b: u8x8 = u8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
27992 let e: u8x8 = u8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
27993 let r: u8x8 = transmute(vuzp2_u8(transmute(a), transmute(b)));
ba9703b0
XL
27994 assert_eq!(r, e);
27995 }
27996
27997 #[simd_test(enable = "neon")]
17df50a5
XL
27998 unsafe fn test_vuzp2q_u8() {
27999 let a: u8x16 = u8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
28000 let b: u8x16 = u8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
28001 let e: u8x16 = u8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
28002 let r: u8x16 = transmute(vuzp2q_u8(transmute(a), transmute(b)));
ba9703b0
XL
28003 assert_eq!(r, e);
28004 }
28005
28006 #[simd_test(enable = "neon")]
17df50a5
XL
28007 unsafe fn test_vuzp2_u16() {
28008 let a: u16x4 = u16x4::new(0, 17, 0, 18);
28009 let b: u16x4 = u16x4::new(0, 18, 0, 19);
28010 let e: u16x4 = u16x4::new(17, 18, 18, 19);
28011 let r: u16x4 = transmute(vuzp2_u16(transmute(a), transmute(b)));
ba9703b0
XL
28012 assert_eq!(r, e);
28013 }
28014
28015 #[simd_test(enable = "neon")]
17df50a5
XL
28016 unsafe fn test_vuzp2q_u16() {
28017 let a: u16x8 = u16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28018 let b: u16x8 = u16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28019 let e: u16x8 = u16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28020 let r: u16x8 = transmute(vuzp2q_u16(transmute(a), transmute(b)));
ba9703b0
XL
28021 assert_eq!(r, e);
28022 }
28023
28024 #[simd_test(enable = "neon")]
17df50a5
XL
28025 unsafe fn test_vuzp2q_u32() {
28026 let a: u32x4 = u32x4::new(0, 17, 0, 18);
28027 let b: u32x4 = u32x4::new(0, 18, 0, 19);
28028 let e: u32x4 = u32x4::new(17, 18, 18, 19);
28029 let r: u32x4 = transmute(vuzp2q_u32(transmute(a), transmute(b)));
ba9703b0
XL
28030 assert_eq!(r, e);
28031 }
28032
28033 #[simd_test(enable = "neon")]
17df50a5
XL
28034 unsafe fn test_vuzp2_p8() {
28035 let a: i8x8 = i8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28036 let b: i8x8 = i8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28037 let e: i8x8 = i8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28038 let r: i8x8 = transmute(vuzp2_p8(transmute(a), transmute(b)));
ba9703b0
XL
28039 assert_eq!(r, e);
28040 }
28041
28042 #[simd_test(enable = "neon")]
17df50a5
XL
28043 unsafe fn test_vuzp2q_p8() {
28044 let a: i8x16 = i8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
28045 let b: i8x16 = i8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
28046 let e: i8x16 = i8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
28047 let r: i8x16 = transmute(vuzp2q_p8(transmute(a), transmute(b)));
ba9703b0
XL
28048 assert_eq!(r, e);
28049 }
28050
28051 #[simd_test(enable = "neon")]
17df50a5
XL
28052 unsafe fn test_vuzp2_p16() {
28053 let a: i16x4 = i16x4::new(0, 17, 0, 18);
28054 let b: i16x4 = i16x4::new(0, 18, 0, 19);
28055 let e: i16x4 = i16x4::new(17, 18, 18, 19);
28056 let r: i16x4 = transmute(vuzp2_p16(transmute(a), transmute(b)));
ba9703b0
XL
28057 assert_eq!(r, e);
28058 }
28059
28060 #[simd_test(enable = "neon")]
17df50a5
XL
28061 unsafe fn test_vuzp2q_p16() {
28062 let a: i16x8 = i16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28063 let b: i16x8 = i16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28064 let e: i16x8 = i16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28065 let r: i16x8 = transmute(vuzp2q_p16(transmute(a), transmute(b)));
ba9703b0
XL
28066 assert_eq!(r, e);
28067 }
28068
28069 #[simd_test(enable = "neon")]
17df50a5
XL
28070 unsafe fn test_vuzp2_s32() {
28071 let a: i32x2 = i32x2::new(0, 17);
28072 let b: i32x2 = i32x2::new(0, 18);
28073 let e: i32x2 = i32x2::new(17, 18);
28074 let r: i32x2 = transmute(vuzp2_s32(transmute(a), transmute(b)));
ba9703b0
XL
28075 assert_eq!(r, e);
28076 }
28077
28078 #[simd_test(enable = "neon")]
17df50a5
XL
28079 unsafe fn test_vuzp2q_s64() {
28080 let a: i64x2 = i64x2::new(0, 17);
28081 let b: i64x2 = i64x2::new(0, 18);
28082 let e: i64x2 = i64x2::new(17, 18);
28083 let r: i64x2 = transmute(vuzp2q_s64(transmute(a), transmute(b)));
ba9703b0
XL
28084 assert_eq!(r, e);
28085 }
28086
28087 #[simd_test(enable = "neon")]
17df50a5
XL
28088 unsafe fn test_vuzp2_u32() {
28089 let a: u32x2 = u32x2::new(0, 17);
28090 let b: u32x2 = u32x2::new(0, 18);
28091 let e: u32x2 = u32x2::new(17, 18);
28092 let r: u32x2 = transmute(vuzp2_u32(transmute(a), transmute(b)));
ba9703b0
XL
28093 assert_eq!(r, e);
28094 }
fc512014
XL
28095
28096 #[simd_test(enable = "neon")]
17df50a5
XL
28097 unsafe fn test_vuzp2q_u64() {
28098 let a: u64x2 = u64x2::new(0, 17);
28099 let b: u64x2 = u64x2::new(0, 18);
28100 let e: u64x2 = u64x2::new(17, 18);
28101 let r: u64x2 = transmute(vuzp2q_u64(transmute(a), transmute(b)));
fc512014
XL
28102 assert_eq!(r, e);
28103 }
28104
28105 #[simd_test(enable = "neon")]
17df50a5
XL
28106 unsafe fn test_vuzp2q_p64() {
28107 let a: i64x2 = i64x2::new(0, 17);
28108 let b: i64x2 = i64x2::new(0, 18);
28109 let e: i64x2 = i64x2::new(17, 18);
28110 let r: i64x2 = transmute(vuzp2q_p64(transmute(a), transmute(b)));
fc512014
XL
28111 assert_eq!(r, e);
28112 }
28113
28114 #[simd_test(enable = "neon")]
17df50a5
XL
28115 unsafe fn test_vuzp2q_f32() {
28116 let a: f32x4 = f32x4::new(0., 8., 1., 9.);
28117 let b: f32x4 = f32x4::new(2., 9., 3., 11.);
28118 let e: f32x4 = f32x4::new(8., 9., 9., 11.);
28119 let r: f32x4 = transmute(vuzp2q_f32(transmute(a), transmute(b)));
fc512014
XL
28120 assert_eq!(r, e);
28121 }
28122
28123 #[simd_test(enable = "neon")]
17df50a5
XL
28124 unsafe fn test_vuzp2_f32() {
28125 let a: f32x2 = f32x2::new(0., 8.);
28126 let b: f32x2 = f32x2::new(2., 9.);
28127 let e: f32x2 = f32x2::new(8., 9.);
28128 let r: f32x2 = transmute(vuzp2_f32(transmute(a), transmute(b)));
28129 assert_eq!(r, e);
28130 }
28131
28132 #[simd_test(enable = "neon")]
28133 unsafe fn test_vuzp2q_f64() {
28134 let a: f64x2 = f64x2::new(0., 8.);
28135 let b: f64x2 = f64x2::new(2., 9.);
28136 let e: f64x2 = f64x2::new(8., 9.);
28137 let r: f64x2 = transmute(vuzp2q_f64(transmute(a), transmute(b)));
28138 assert_eq!(r, e);
28139 }
28140
28141 #[simd_test(enable = "neon")]
28142 unsafe fn test_vabal_high_u8() {
28143 let a: u16x8 = u16x8::new(9, 10, 11, 12, 13, 14, 15, 16);
28144 let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
28145 let c: u8x16 = u8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 20, 0, 2, 4, 6, 8, 10, 12);
28146 let e: u16x8 = u16x8::new(20, 20, 20, 20, 20, 20, 20, 20);
28147 let r: u16x8 = transmute(vabal_high_u8(transmute(a), transmute(b), transmute(c)));
28148 assert_eq!(r, e);
28149 }
28150
28151 #[simd_test(enable = "neon")]
28152 unsafe fn test_vabal_high_u16() {
28153 let a: u32x4 = u32x4::new(9, 10, 11, 12);
28154 let b: u16x8 = u16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
28155 let c: u16x8 = u16x8::new(10, 10, 10, 10, 20, 0, 2, 4);
28156 let e: u32x4 = u32x4::new(20, 20, 20, 20);
28157 let r: u32x4 = transmute(vabal_high_u16(transmute(a), transmute(b), transmute(c)));
28158 assert_eq!(r, e);
28159 }
28160
28161 #[simd_test(enable = "neon")]
28162 unsafe fn test_vabal_high_u32() {
28163 let a: u64x2 = u64x2::new(15, 16);
28164 let b: u32x4 = u32x4::new(1, 2, 15, 16);
28165 let c: u32x4 = u32x4::new(10, 10, 10, 12);
28166 let e: u64x2 = u64x2::new(20, 20);
28167 let r: u64x2 = transmute(vabal_high_u32(transmute(a), transmute(b), transmute(c)));
28168 assert_eq!(r, e);
28169 }
28170
28171 #[simd_test(enable = "neon")]
28172 unsafe fn test_vabal_high_s8() {
28173 let a: i16x8 = i16x8::new(9, 10, 11, 12, 13, 14, 15, 16);
28174 let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
28175 let c: i8x16 = i8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 20, 0, 2, 4, 6, 8, 10, 12);
28176 let e: i16x8 = i16x8::new(20, 20, 20, 20, 20, 20, 20, 20);
28177 let r: i16x8 = transmute(vabal_high_s8(transmute(a), transmute(b), transmute(c)));
28178 assert_eq!(r, e);
28179 }
28180
28181 #[simd_test(enable = "neon")]
28182 unsafe fn test_vabal_high_s16() {
28183 let a: i32x4 = i32x4::new(9, 10, 11, 12);
28184 let b: i16x8 = i16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
28185 let c: i16x8 = i16x8::new(10, 10, 10, 10, 20, 0, 2, 4);
28186 let e: i32x4 = i32x4::new(20, 20, 20, 20);
28187 let r: i32x4 = transmute(vabal_high_s16(transmute(a), transmute(b), transmute(c)));
28188 assert_eq!(r, e);
28189 }
28190
28191 #[simd_test(enable = "neon")]
28192 unsafe fn test_vabal_high_s32() {
28193 let a: i64x2 = i64x2::new(15, 16);
28194 let b: i32x4 = i32x4::new(1, 2, 15, 16);
28195 let c: i32x4 = i32x4::new(10, 10, 10, 12);
28196 let e: i64x2 = i64x2::new(20, 20);
28197 let r: i64x2 = transmute(vabal_high_s32(transmute(a), transmute(b), transmute(c)));
28198 assert_eq!(r, e);
28199 }
28200
28201 #[simd_test(enable = "neon")]
28202 unsafe fn test_vqabs_s64() {
28203 let a: i64x1 = i64x1::new(-9223372036854775808);
28204 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
28205 let r: i64x1 = transmute(vqabs_s64(transmute(a)));
28206 assert_eq!(r, e);
28207 }
28208
28209 #[simd_test(enable = "neon")]
28210 unsafe fn test_vqabsq_s64() {
28211 let a: i64x2 = i64x2::new(-9223372036854775808, -7);
28212 let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 7);
28213 let r: i64x2 = transmute(vqabsq_s64(transmute(a)));
fc512014
XL
28214 assert_eq!(r, e);
28215 }
3c0e092e
XL
28216
28217 #[simd_test(enable = "neon")]
28218 unsafe fn test_vqabsb_s8() {
28219 let a: i8 = -7;
28220 let e: i8 = 7;
28221 let r: i8 = transmute(vqabsb_s8(transmute(a)));
28222 assert_eq!(r, e);
28223 }
28224
28225 #[simd_test(enable = "neon")]
28226 unsafe fn test_vqabsh_s16() {
28227 let a: i16 = -7;
28228 let e: i16 = 7;
28229 let r: i16 = transmute(vqabsh_s16(transmute(a)));
28230 assert_eq!(r, e);
28231 }
28232
28233 #[simd_test(enable = "neon")]
28234 unsafe fn test_vqabss_s32() {
28235 let a: i32 = -7;
28236 let e: i32 = 7;
28237 let r: i32 = transmute(vqabss_s32(transmute(a)));
28238 assert_eq!(r, e);
28239 }
28240
28241 #[simd_test(enable = "neon")]
28242 unsafe fn test_vqabsd_s64() {
28243 let a: i64 = -7;
28244 let e: i64 = 7;
28245 let r: i64 = transmute(vqabsd_s64(transmute(a)));
28246 assert_eq!(r, e);
28247 }
28248
28249 #[simd_test(enable = "neon")]
28250 unsafe fn test_vslid_n_s64() {
28251 let a: i64 = 333;
28252 let b: i64 = 2042;
28253 let e: i64 = 8169;
28254 let r: i64 = transmute(vslid_n_s64::<2>(transmute(a), transmute(b)));
28255 assert_eq!(r, e);
28256 }
28257
28258 #[simd_test(enable = "neon")]
28259 unsafe fn test_vslid_n_u64() {
28260 let a: u64 = 333;
28261 let b: u64 = 2042;
28262 let e: u64 = 8169;
28263 let r: u64 = transmute(vslid_n_u64::<2>(transmute(a), transmute(b)));
28264 assert_eq!(r, e);
28265 }
28266
28267 #[simd_test(enable = "neon")]
28268 unsafe fn test_vsrid_n_s64() {
28269 let a: i64 = 333;
28270 let b: i64 = 2042;
28271 let e: i64 = 510;
28272 let r: i64 = transmute(vsrid_n_s64::<2>(transmute(a), transmute(b)));
28273 assert_eq!(r, e);
28274 }
28275
28276 #[simd_test(enable = "neon")]
28277 unsafe fn test_vsrid_n_u64() {
28278 let a: u64 = 333;
28279 let b: u64 = 2042;
28280 let e: u64 = 510;
28281 let r: u64 = transmute(vsrid_n_u64::<2>(transmute(a), transmute(b)));
28282 assert_eq!(r, e);
28283 }
ba9703b0 28284}